mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-27 12:46:03 +01:00
Merge pull request #7452 from steven-zou/fix_issues_for_jobservice
Fix issues for jobservice
This commit is contained in:
commit
9bd2de3e35
16
src/Gopkg.lock
generated
16
src/Gopkg.lock
generated
@ -440,16 +440,25 @@
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:994df93785d966f82180e17a0857fa53f7155cddca3898ad00b27e8d4481e4ae"
|
||||
digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
"require",
|
||||
"suite",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
|
||||
version = "v1.2.0"
|
||||
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b"
|
||||
@ -749,6 +758,7 @@
|
||||
"github.com/pkg/errors",
|
||||
"github.com/robfig/cron",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/mock",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/stretchr/testify/suite",
|
||||
"golang.org/x/crypto/pbkdf2",
|
||||
|
@ -52,7 +52,7 @@ ignored = ["github.com/goharbor/harbor/tests*"]
|
||||
name = "github.com/go-sql-driver/mysql"
|
||||
version = "=1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
[[override]]
|
||||
name = "github.com/mattn/go-sqlite3"
|
||||
version = "=1.6.0"
|
||||
|
||||
@ -66,7 +66,7 @@ ignored = ["github.com/goharbor/harbor/tests*"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "=1.2.0"
|
||||
version = "=1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/handlers"
|
||||
|
@ -65,7 +65,7 @@ func (c *Controller) getIndexYaml(namespaces []string) (*helm_repo.IndexFile, er
|
||||
// Retrieve index.yaml for repositories
|
||||
workerPool := make(chan struct{}, initialItemCount)
|
||||
|
||||
// Add initial tokens to the pool
|
||||
// Add initial tokens to the worker
|
||||
for i := 0; i < initialItemCount; i++ {
|
||||
workerPool <- struct{}{}
|
||||
}
|
||||
@ -103,7 +103,7 @@ LOOP:
|
||||
go func(ns string) {
|
||||
defer func() {
|
||||
waitGroup.Done() // done
|
||||
// Return the worker back to the pool
|
||||
// Return the worker back to the worker
|
||||
workerPool <- struct{}{}
|
||||
}()
|
||||
|
||||
|
@ -7,10 +7,6 @@ const (
|
||||
ImageScanAllJob = "IMAGE_SCAN_ALL"
|
||||
// ImageGC the name of image garbage collection job in job service
|
||||
ImageGC = "IMAGE_GC"
|
||||
// Replication : the name of the replication job in job service
|
||||
Replication = "REPLICATION"
|
||||
// ReplicationScheduler : the name of the replication scheduler job in job service
|
||||
ReplicationScheduler = "IMAGE_REPLICATE"
|
||||
|
||||
// JobKindGeneric : Kind of generic job
|
||||
JobKindGeneric = "Generic"
|
||||
|
@ -54,7 +54,7 @@ type JobPoolStats struct {
|
||||
Pools []*JobPoolStatsData `json:"worker_pools"`
|
||||
}
|
||||
|
||||
// JobPoolStatsData represent the healthy and status of the worker pool.
|
||||
// JobPoolStatsData represent the healthy and status of the worker worker.
|
||||
type JobPoolStatsData struct {
|
||||
WorkerPoolID string `json:"worker_pool_id"`
|
||||
StartedAt int64 `json:"started_at"`
|
||||
|
@ -192,7 +192,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
|
||||
pool := x509.NewCertPool()
|
||||
// Do not throw error if the certificate is malformed, so we can put a place holder.
|
||||
if ok := pool.AppendCertsFromPEM(content); !ok {
|
||||
log.Warningf("Failed to append certificate to cert pool, cert path: %s", cfg.CARootPath)
|
||||
log.Warningf("Failed to append certificate to cert worker, cert path: %s", cfg.CARootPath)
|
||||
} else {
|
||||
tc.RootCAs = pool
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ func initProjectManager() error {
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
if ok := pool.AppendCertsFromPEM(content); !ok {
|
||||
return fmt.Errorf("failed to append cert content into cert pool")
|
||||
return fmt.Errorf("failed to append cert content into cert worker")
|
||||
}
|
||||
AdmiralClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
@ -20,8 +20,8 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -16,7 +16,6 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -24,13 +23,19 @@ import (
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/core"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/pkg/errors"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const totalHeaderKey = "Total-Count"
|
||||
|
||||
// Handler defines approaches to handle the http requests.
|
||||
type Handler interface {
|
||||
// HandleLaunchJobReq is used to handle the job submission request.
|
||||
@ -47,6 +52,12 @@ type Handler interface {
|
||||
|
||||
// HandleJobLogReq is used to handle the request of getting job logs
|
||||
HandleJobLogReq(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
// HandleJobLogReq is used to handle the request of getting periodic executions
|
||||
HandlePeriodicExecutions(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
// HandleScheduledJobs is used to handle the request of getting pending scheduled jobs
|
||||
HandleScheduledJobs(w http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
// DefaultHandler is the default request handler which implements the Handler interface.
|
||||
@ -63,10 +74,6 @@ func NewDefaultHandler(ctl core.Interface) *DefaultHandler {
|
||||
|
||||
// HandleLaunchJobReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w, req) {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.ReadRequestBodyError(err))
|
||||
@ -74,8 +81,8 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
|
||||
}
|
||||
|
||||
// unmarshal data
|
||||
jobReq := models.JobRequest{}
|
||||
if err = json.Unmarshal(data, &jobReq); err != nil {
|
||||
jobReq := &job.Request{}
|
||||
if err = json.Unmarshal(data, jobReq); err != nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.HandleJSONDataError(err))
|
||||
return
|
||||
}
|
||||
@ -83,13 +90,19 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
|
||||
// Pass request to the controller for the follow-up.
|
||||
jobStats, err := dh.controller.LaunchJob(jobReq)
|
||||
if err != nil {
|
||||
if errs.IsConflictError(err) {
|
||||
code := http.StatusInternalServerError
|
||||
if errs.IsBadRequestError(err) {
|
||||
// Bad request
|
||||
code = http.StatusBadRequest
|
||||
} else if errs.IsConflictError(err) {
|
||||
// Conflict error
|
||||
dh.handleError(w, req, http.StatusConflict, err)
|
||||
code = http.StatusConflict
|
||||
} else {
|
||||
// General error
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.LaunchJobError(err))
|
||||
err = errs.LaunchJobError(err)
|
||||
}
|
||||
|
||||
dh.handleError(w, req, code, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -98,22 +111,20 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
|
||||
|
||||
// HandleGetJobReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w, req) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(req)
|
||||
jobID := vars["job_id"]
|
||||
|
||||
jobStats, err := dh.controller.GetJob(jobID)
|
||||
if err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
backErr := errs.GetJobStatsError(err)
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
backErr = err
|
||||
} else if errs.IsBadRequestError(err) {
|
||||
code = http.StatusBadRequest
|
||||
} else {
|
||||
err = errs.GetJobStatsError(err)
|
||||
}
|
||||
dh.handleError(w, req, code, backErr)
|
||||
dh.handleError(w, req, code, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -122,10 +133,6 @@ func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Reque
|
||||
|
||||
// HandleJobActionReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w, req) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(req)
|
||||
jobID := vars["job_id"]
|
||||
|
||||
@ -136,48 +143,30 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
|
||||
}
|
||||
|
||||
// unmarshal data
|
||||
jobActionReq := models.JobActionRequest{}
|
||||
if err = json.Unmarshal(data, &jobActionReq); err != nil {
|
||||
jobActionReq := &job.ActionRequest{}
|
||||
if err = json.Unmarshal(data, jobActionReq); err != nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.HandleJSONDataError(err))
|
||||
return
|
||||
}
|
||||
|
||||
switch jobActionReq.Action {
|
||||
case opm.CtlCommandStop:
|
||||
if err := dh.controller.StopJob(jobID); err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
backErr := errs.StopJobError(err)
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
backErr = err
|
||||
}
|
||||
dh.handleError(w, req, code, backErr)
|
||||
return
|
||||
// Only support stop command now
|
||||
cmd := job.OPCommand(jobActionReq.Action)
|
||||
if !cmd.IsStop() {
|
||||
dh.handleError(w, req, http.StatusNotImplemented, errs.UnknownActionNameError(errors.Errorf("command: %s", jobActionReq.Action)))
|
||||
return
|
||||
}
|
||||
|
||||
// Stop job
|
||||
if err := dh.controller.StopJob(jobID); err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
} else if errs.IsBadRequestError(err) {
|
||||
code = http.StatusBadRequest
|
||||
} else {
|
||||
err = errs.StopJobError(err)
|
||||
}
|
||||
case opm.CtlCommandCancel:
|
||||
if err := dh.controller.CancelJob(jobID); err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
backErr := errs.CancelJobError(err)
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
backErr = err
|
||||
}
|
||||
dh.handleError(w, req, code, backErr)
|
||||
return
|
||||
}
|
||||
case opm.CtlCommandRetry:
|
||||
if err := dh.controller.RetryJob(jobID); err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
backErr := errs.RetryJobError(err)
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
backErr = err
|
||||
}
|
||||
dh.handleError(w, req, code, backErr)
|
||||
return
|
||||
}
|
||||
default:
|
||||
dh.handleError(w, req, http.StatusNotImplemented, errs.UnknownActionNameError(fmt.Errorf("%s", jobID)))
|
||||
dh.handleError(w, req, code, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -188,10 +177,6 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
|
||||
|
||||
// HandleCheckStatusReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w, req) {
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := dh.controller.CheckStatus()
|
||||
if err != nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.CheckStatsError(err))
|
||||
@ -203,34 +188,74 @@ func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.
|
||||
|
||||
// HandleJobLogReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleJobLogReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w, req) {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(req)
|
||||
jobID := vars["job_id"]
|
||||
|
||||
if strings.Contains(jobID, "..") || strings.ContainsRune(jobID, os.PathSeparator) {
|
||||
dh.handleError(w, req, http.StatusBadRequest, fmt.Errorf("Invalid Job ID: %s", jobID))
|
||||
dh.handleError(w, req, http.StatusBadRequest, errors.Errorf("invalid Job ID: %s", jobID))
|
||||
return
|
||||
}
|
||||
|
||||
logData, err := dh.controller.GetJobLogData(jobID)
|
||||
if err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
backErr := errs.GetJobLogError(err)
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
backErr = err
|
||||
} else if errs.IsBadRequestError(err) {
|
||||
code = http.StatusBadRequest
|
||||
} else {
|
||||
err = errs.GetJobLogError(err)
|
||||
}
|
||||
dh.handleError(w, req, code, backErr)
|
||||
dh.handleError(w, req, code, err)
|
||||
return
|
||||
}
|
||||
|
||||
dh.log(req, http.StatusOK, "")
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(logData)
|
||||
writeDate(w, logData)
|
||||
}
|
||||
|
||||
// HandlePeriodicExecutions is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandlePeriodicExecutions(w http.ResponseWriter, req *http.Request) {
|
||||
// Get param
|
||||
vars := mux.Vars(req)
|
||||
jobID := vars["job_id"]
|
||||
|
||||
// Get query params
|
||||
q := extractQuery(req)
|
||||
|
||||
executions, total, err := dh.controller.GetPeriodicExecutions(jobID, q)
|
||||
if err != nil {
|
||||
code := http.StatusInternalServerError
|
||||
if errs.IsObjectNotFoundError(err) {
|
||||
code = http.StatusNotFound
|
||||
} else if errs.IsBadRequestError(err) {
|
||||
code = http.StatusBadRequest
|
||||
} else {
|
||||
err = errs.GetPeriodicExecutionError(err)
|
||||
}
|
||||
dh.handleError(w, req, code, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Add(totalHeaderKey, fmt.Sprintf("%d", total))
|
||||
dh.handleJSONData(w, req, http.StatusOK, executions)
|
||||
|
||||
}
|
||||
|
||||
// HandleScheduledJobs is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleScheduledJobs(w http.ResponseWriter, req *http.Request) {
|
||||
// Get query parameters
|
||||
q := extractQuery(req)
|
||||
jobs, total, err := dh.controller.ScheduledJobs(q)
|
||||
if err != nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.GetScheduledJobsError(err))
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Add(totalHeaderKey, fmt.Sprintf("%d", total))
|
||||
dh.handleJSONData(w, req, http.StatusOK, jobs)
|
||||
}
|
||||
|
||||
func (dh *DefaultHandler) handleJSONData(w http.ResponseWriter, req *http.Request, code int, object interface{}) {
|
||||
@ -245,7 +270,7 @@ func (dh *DefaultHandler) handleJSONData(w http.ResponseWriter, req *http.Reques
|
||||
w.Header().Set(http.CanonicalHeaderKey("Accept"), "application/json")
|
||||
w.Header().Set(http.CanonicalHeaderKey("content-type"), "application/json")
|
||||
w.WriteHeader(code)
|
||||
w.Write(data)
|
||||
writeDate(w, data)
|
||||
}
|
||||
|
||||
func (dh *DefaultHandler) handleError(w http.ResponseWriter, req *http.Request, code int, err error) {
|
||||
@ -253,18 +278,54 @@ func (dh *DefaultHandler) handleError(w http.ResponseWriter, req *http.Request,
|
||||
logger.Errorf("Serve http request '%s %s' error: %d %s", req.Method, req.URL.String(), code, err.Error())
|
||||
|
||||
w.WriteHeader(code)
|
||||
w.Write([]byte(err.Error()))
|
||||
}
|
||||
|
||||
func (dh *DefaultHandler) preCheck(w http.ResponseWriter, req *http.Request) bool {
|
||||
if dh.controller == nil {
|
||||
dh.handleError(w, req, http.StatusInternalServerError, errs.MissingBackendHandlerError(fmt.Errorf("nil controller")))
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
writeDate(w, []byte(err.Error()))
|
||||
}
|
||||
|
||||
func (dh *DefaultHandler) log(req *http.Request, code int, text string) {
|
||||
logger.Debugf("Serve http request '%s %s': %d %s", req.Method, req.URL.String(), code, text)
|
||||
}
|
||||
|
||||
func extractQuery(req *http.Request) *query.Parameter {
|
||||
q := &query.Parameter{
|
||||
PageNumber: 1,
|
||||
PageSize: query.DefaultPageSize,
|
||||
Extras: make(query.ExtraParameters),
|
||||
}
|
||||
|
||||
queries := req.URL.Query()
|
||||
// Page number
|
||||
p := queries.Get(query.ParamKeyPage)
|
||||
if !utils.IsEmptyStr(p) {
|
||||
if pv, err := strconv.ParseUint(p, 10, 32); err == nil {
|
||||
if pv > 1 {
|
||||
q.PageNumber = uint(pv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page number
|
||||
size := queries.Get(query.ParamKeyPageSize)
|
||||
if !utils.IsEmptyStr(size) {
|
||||
if pz, err := strconv.ParseUint(size, 10, 32); err == nil {
|
||||
if pz > 0 {
|
||||
q.PageSize = uint(pz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extra query parameters
|
||||
nonStoppedOnly := queries.Get(query.ParamKeyNonStoppedOnly)
|
||||
if !utils.IsEmptyStr(nonStoppedOnly) {
|
||||
if nonStoppedOnlyV, err := strconv.ParseBool(nonStoppedOnly); err == nil {
|
||||
q.Extras.Set(query.ExtraParamKeyNonStoppedOnly, nonStoppedOnlyV)
|
||||
}
|
||||
}
|
||||
|
||||
return q
|
||||
}
|
||||
|
||||
func writeDate(w http.ResponseWriter, bytes []byte) {
|
||||
if _, err := w.Write(bytes); err != nil {
|
||||
logger.Errorf("writer write error: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -18,298 +18,495 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/worker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
const fakeSecret = "I'mfakesecret"
|
||||
const (
|
||||
secretKey = "CORE_SECRET"
|
||||
fakeSecret = "I'mfakesecret"
|
||||
)
|
||||
|
||||
var testingAuthProvider = &SecretAuthenticator{}
|
||||
var testingHandler = NewDefaultHandler(&fakeController{})
|
||||
var testingRouter = NewBaseRouter(testingHandler, testingAuthProvider)
|
||||
var client = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 20,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
// APIHandlerTestSuite tests functions of API handler
|
||||
type APIHandlerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
server *Server
|
||||
controller *fakeController
|
||||
APIAddr string
|
||||
client *http.Client
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func TestUnAuthorizedAccess(t *testing.T) {
|
||||
exportUISecret("hello")
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *APIHandlerTestSuite) SetupSuite() {
|
||||
_ = os.Setenv(secretKey, fakeSecret)
|
||||
|
||||
suite.client = &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 20,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
suite.createServer()
|
||||
|
||||
go func() {
|
||||
_ = suite.server.Start()
|
||||
}()
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port))
|
||||
if e := expectFormatedError(res, err); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
if strings.Index(err.Error(), "401") == -1 {
|
||||
t.Fatalf("expect '401' but got none 401 error")
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
}
|
||||
|
||||
func TestLaunchJobFailed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
resData, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs", port), createJobReq(false))
|
||||
if e := expectFormatedError(resData, err); e != nil {
|
||||
t.Error(e)
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
// TearDownSuite clears test suite
|
||||
func (suite *APIHandlerTestSuite) TearDownSuite() {
|
||||
_ = os.Unsetenv(secretKey)
|
||||
_ = suite.server.Stop()
|
||||
suite.cancel()
|
||||
}
|
||||
|
||||
func TestLaunchJobSucceed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
res, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs", port), createJobReq(true))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
obj, err := getResult(res)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if obj.Stats.JobID != "fake_ID_ok" {
|
||||
t.Fatalf("expect job ID 'fake_ID_ok' but got '%s'\n", obj.Stats.JobID)
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
// TestAPIHandlerTestSuite is suite entry for 'go test'
|
||||
func TestAPIHandlerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(APIHandlerTestSuite))
|
||||
}
|
||||
|
||||
func TestGetJobFailed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestUnAuthorizedAccess ...
|
||||
func (suite *APIHandlerTestSuite) TestUnAuthorizedAccess() {
|
||||
_ = os.Unsetenv(secretKey)
|
||||
defer func() {
|
||||
_ = os.Setenv(secretKey, fakeSecret)
|
||||
}()
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port))
|
||||
if e := expectFormatedError(res, err); e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job"))
|
||||
assert.Equal(suite.T(), 401, code, "expect '401' but got none 401 error")
|
||||
}
|
||||
|
||||
func TestGetJobSucceed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestLaunchJobFailed ...
|
||||
func (suite *APIHandlerTestSuite) TestLaunchJobFailed() {
|
||||
req := createJobReq()
|
||||
bytes, _ := json.Marshal(req)
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
fc1 := &fakeController{}
|
||||
fc1.On("LaunchJob", req).Return(nil, errs.BadRequestError(req.Job.Name))
|
||||
suite.controller = fc1
|
||||
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
|
||||
assert.Equal(suite.T(), 400, code, "expect 400 bad request but got %d", code)
|
||||
|
||||
res, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
obj, err := getResult(res)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if obj.Stats.JobName != "testing" || obj.Stats.JobID != "fake_ID_ok" {
|
||||
t.Fatalf("expect job ID 'fake_ID_ok' of 'testing', but got '%s'\n", obj.Stats.JobID)
|
||||
}
|
||||
fc2 := &fakeController{}
|
||||
fc2.On("LaunchJob", req).Return(nil, errs.ConflictError(req.Job.Name))
|
||||
suite.controller = fc2
|
||||
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
|
||||
assert.Equal(suite.T(), 409, code, "expect 409 conflict but got %d", code)
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
fc3 := &fakeController{}
|
||||
fc3.On("LaunchJob", req).Return(nil, errs.LaunchJobError(errors.New("testing launch job")))
|
||||
suite.controller = fc3
|
||||
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
|
||||
assert.Equal(suite.T(), 500, code, "expect 500 internal server error but got %d", code)
|
||||
}
|
||||
|
||||
func TestJobActionFailed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestLaunchJobSucceed ...
|
||||
func (suite *APIHandlerTestSuite) TestLaunchJobSucceed() {
|
||||
req := createJobReq()
|
||||
bytes, _ := json.Marshal(req)
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
fc := &fakeController{}
|
||||
fc.On("LaunchJob", req).Return(createJobStats("sample", "Generic", ""), nil)
|
||||
suite.controller = fc
|
||||
|
||||
actionReq, err := createJobActionReq("stop")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resData, err := postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
|
||||
expectFormatedError(resData, err)
|
||||
|
||||
actionReq, err = createJobActionReq("cancel")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resData, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
|
||||
expectFormatedError(resData, err)
|
||||
|
||||
actionReq, err = createJobActionReq("retry")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resData, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job", port), actionReq)
|
||||
expectFormatedError(resData, err)
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs"), bytes)
|
||||
assert.Equal(suite.T(), 202, code, "expected 202 created but got %d when launching job", code)
|
||||
}
|
||||
|
||||
func TestJobActionSucceed(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestGetJobFailed ...
|
||||
func (suite *APIHandlerTestSuite) TestGetJobFailed() {
|
||||
fc := &fakeController{}
|
||||
fc.On("GetJob", "fake_job_ID").Return(nil, errs.NoObjectFoundError("fake_job_ID"))
|
||||
suite.controller = fc
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
actionReq, err := createJobActionReq("stop")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actionReq, err = createJobActionReq("cancel")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actionReq, err = createJobActionReq("retry")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = postReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok", port), actionReq)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"))
|
||||
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d when getting job", code)
|
||||
}
|
||||
|
||||
func TestCheckStatus(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestGetJobSucceed ...
|
||||
func (suite *APIHandlerTestSuite) TestGetJobSucceed() {
|
||||
fc := &fakeController{}
|
||||
fc.On("GetJob", "fake_job_ID").Return(createJobStats("sample", "Generic", ""), nil)
|
||||
suite.controller = fc
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
resData, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/stats", port))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
poolStats := &models.JobPoolStats{
|
||||
Pools: make([]*models.JobPoolStatsData, 0),
|
||||
}
|
||||
err = json.Unmarshal(resData, poolStats)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if poolStats.Pools[0].WorkerPoolID != "fake_pool_ID" {
|
||||
t.Fatalf("expect pool ID 'fake_pool_ID' but got '%s'", poolStats.Pools[0].WorkerPoolID)
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
res, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"))
|
||||
require.Equal(suite.T(), 200, code, "expected 200 ok but got %d when getting job", code)
|
||||
stats, err := getResult(res)
|
||||
require.Nil(suite.T(), err, "no error should be occurred when unmarshal job stats")
|
||||
assert.Equal(suite.T(), "fake_job_ID", stats.Info.JobID, "expected job ID 'fake_job_ID' but got %s", stats.Info.JobID)
|
||||
}
|
||||
|
||||
func TestGetJobLogInvalidID(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestJobActionFailed ...
|
||||
func (suite *APIHandlerTestSuite) TestJobActionFailed() {
|
||||
actionReq := createJobActionReq("not-support")
|
||||
data, _ := json.Marshal(actionReq)
|
||||
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
|
||||
assert.Equal(suite.T(), 501, code, "expected 501 not implemented but got %d", code)
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
fc1 := &fakeController{}
|
||||
fc1.On("StopJob", "fake_job_ID_not").Return(errs.NoObjectFoundError("fake_job_ID_not"))
|
||||
suite.controller = fc1
|
||||
actionReq = createJobActionReq("stop")
|
||||
data, _ = json.Marshal(actionReq)
|
||||
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not"), data)
|
||||
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d", code)
|
||||
|
||||
_, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/%%2F..%%2Fpasswd/log", port))
|
||||
if err == nil || strings.Contains(err.Error(), "400") {
|
||||
t.Fatalf("Expected 400 error but got: %v", err)
|
||||
}
|
||||
fc2 := &fakeController{}
|
||||
fc2.On("StopJob", "fake_job_ID").Return(errs.BadRequestError("fake_job_ID"))
|
||||
suite.controller = fc2
|
||||
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
|
||||
assert.Equal(suite.T(), 400, code, "expected 400 bad request but got %d", code)
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
fc3 := &fakeController{}
|
||||
fc3.On("StopJob", "fake_job_ID").Return(errs.StopJobError(errors.New("testing error")))
|
||||
suite.controller = fc3
|
||||
_, code = suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID"), data)
|
||||
assert.Equal(suite.T(), 500, code, "expected 500 internal server but got %d", code)
|
||||
}
|
||||
|
||||
func TestGetJobLog(t *testing.T) {
|
||||
exportUISecret(fakeSecret)
|
||||
// TestJobActionSucceed ...
|
||||
func (suite *APIHandlerTestSuite) TestJobActionSucceed() {
|
||||
fc := &fakeController{}
|
||||
fc.On("StopJob", "fake_job_ID_not").Return(nil)
|
||||
suite.controller = fc
|
||||
actionReq := createJobActionReq("stop")
|
||||
data, _ := json.Marshal(actionReq)
|
||||
_, code := suite.postReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not"), data)
|
||||
assert.Equal(suite.T(), 204, code, "expected 204 no content but got %d", code)
|
||||
}
|
||||
|
||||
server, port, ctx := createServer()
|
||||
server.Start()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
// TestCheckStatus ...
|
||||
func (suite *APIHandlerTestSuite) TestCheckStatus() {
|
||||
statsRes := &worker.Stats{
|
||||
Pools: []*worker.StatsData{
|
||||
{
|
||||
WorkerPoolID: "my-worker-pool-ID",
|
||||
},
|
||||
},
|
||||
}
|
||||
fc := &fakeController{}
|
||||
fc.On("CheckStatus").Return(statsRes, nil)
|
||||
suite.controller = fc
|
||||
|
||||
resData, err := getReq(fmt.Sprintf("http://localhost:%d/api/v1/jobs/fake_job_ok/log", port))
|
||||
bytes, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "stats"))
|
||||
require.Equal(suite.T(), 200, code, "expected 200 ok when getting worker stats but got %d", code)
|
||||
|
||||
poolStats := &worker.Stats{
|
||||
Pools: make([]*worker.StatsData, 0),
|
||||
}
|
||||
err := json.Unmarshal(bytes, poolStats)
|
||||
assert.Nil(suite.T(), err, "no error should be occurred when unmarshal worker stats")
|
||||
assert.Equal(suite.T(), 1, len(poolStats.Pools), "at least 1 pool exists but got %d", len(poolStats.Pools))
|
||||
assert.Equal(suite.T(), "my-worker-pool-ID", poolStats.Pools[0].WorkerPoolID, "expected pool ID 'my-worker-pool-ID' but got %s", poolStats.Pools[0].WorkerPoolID)
|
||||
}
|
||||
|
||||
// TestGetJobLogInvalidID ...
|
||||
func (suite *APIHandlerTestSuite) TestGetJobLogInvalidID() {
|
||||
fc := &fakeController{}
|
||||
fc.On("GetJobLogData", "fake_job_ID_not").Return(nil, errs.NoObjectFoundError("fake_job_ID_not"))
|
||||
suite.controller = fc
|
||||
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID_not/log"))
|
||||
assert.Equal(suite.T(), 404, code, "expected 404 not found but got %d", code)
|
||||
}
|
||||
|
||||
// TestGetJobLog ...
|
||||
func (suite *APIHandlerTestSuite) TestGetJobLog() {
|
||||
fc := &fakeController{}
|
||||
fc.On("GetJobLogData", "fake_job_ID").Return([]byte("hello log"), nil)
|
||||
suite.controller = fc
|
||||
|
||||
resData, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/log"))
|
||||
require.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
|
||||
assert.Equal(suite.T(), "hello log", string(resData))
|
||||
}
|
||||
|
||||
// TestGetPeriodicExecutionsWithoutQuery ...
|
||||
func (suite *APIHandlerTestSuite) TestGetPeriodicExecutionsWithoutQuery() {
|
||||
q := &query.Parameter{
|
||||
PageNumber: 1,
|
||||
PageSize: query.DefaultPageSize,
|
||||
Extras: make(query.ExtraParameters),
|
||||
}
|
||||
|
||||
fc := &fakeController{}
|
||||
fc.On("GetPeriodicExecutions", "fake_job_ID", q).
|
||||
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
|
||||
suite.controller = fc
|
||||
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/executions"))
|
||||
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
|
||||
}
|
||||
|
||||
// TestGetPeriodicExecutionsWithQuery ...
|
||||
func (suite *APIHandlerTestSuite) TestGetPeriodicExecutionsWithQuery() {
|
||||
extras := make(query.ExtraParameters)
|
||||
extras.Set(query.ExtraParamKeyNonStoppedOnly, true)
|
||||
q := &query.Parameter{
|
||||
PageNumber: 2,
|
||||
PageSize: 50,
|
||||
Extras: extras,
|
||||
}
|
||||
|
||||
fc := &fakeController{}
|
||||
fc.On("GetPeriodicExecutions", "fake_job_ID", q).
|
||||
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
|
||||
suite.controller = fc
|
||||
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/fake_job_ID/executions?page_number=2&page_size=50&non_dead_only=true"))
|
||||
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
|
||||
}
|
||||
|
||||
// TestScheduledJobs ...
|
||||
func (suite *APIHandlerTestSuite) TestScheduledJobs() {
|
||||
q := &query.Parameter{
|
||||
PageNumber: 2,
|
||||
PageSize: 50,
|
||||
Extras: make(query.ExtraParameters),
|
||||
}
|
||||
|
||||
fc := &fakeController{}
|
||||
fc.On("ScheduledJobs", q).
|
||||
Return([]*job.Stats{createJobStats("sample", "Generic", "")}, int64(1), nil)
|
||||
suite.controller = fc
|
||||
|
||||
_, code := suite.getReq(fmt.Sprintf("%s/%s", suite.APIAddr, "jobs/scheduled?page_number=2&page_size=50"))
|
||||
assert.Equal(suite.T(), 200, code, "expected 200 ok but got %d", code)
|
||||
}
|
||||
|
||||
// createServer ...
|
||||
func (suite *APIHandlerTestSuite) createServer() {
|
||||
port := uint(30000 + rand.Intn(1000))
|
||||
suite.APIAddr = fmt.Sprintf("http://localhost:%d/api/v1", port)
|
||||
|
||||
config := ServerConfig{
|
||||
Protocol: "http",
|
||||
Port: port,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
testingRouter := NewBaseRouter(
|
||||
NewDefaultHandler(suite),
|
||||
&SecretAuthenticator{},
|
||||
)
|
||||
suite.server = NewServer(ctx, testingRouter, config)
|
||||
suite.cancel = cancel
|
||||
}
|
||||
|
||||
// postReq ...
|
||||
func (suite *APIHandlerTestSuite) postReq(url string, data []byte) ([]byte, int) {
|
||||
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(string(data)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
if len(resData) == 0 {
|
||||
t.Fatal("expect job log but got nothing")
|
||||
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
|
||||
|
||||
res, err := suite.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
server.Stop()
|
||||
ctx.WG.Wait()
|
||||
}
|
||||
var (
|
||||
resData []byte
|
||||
)
|
||||
|
||||
func expectFormatedError(data []byte, err error) error {
|
||||
if err == nil {
|
||||
return errors.New("expect error but got nil")
|
||||
}
|
||||
|
||||
if err != nil && len(data) <= 0 {
|
||||
return errors.New("expect error but got nothing")
|
||||
}
|
||||
|
||||
if err != nil && len(data) > 0 {
|
||||
var m = make(map[string]interface{})
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := m["code"]; !ok {
|
||||
return errors.New("malformated error")
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
if res.ContentLength > 0 {
|
||||
resData, err = ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return resData, res.StatusCode
|
||||
}
|
||||
|
||||
func createJobReq(ok bool) []byte {
|
||||
params := make(map[string]interface{})
|
||||
// getReq ...
|
||||
func (suite *APIHandlerTestSuite) getReq(url string) ([]byte, int) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
|
||||
|
||||
res, err := suite.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
return data, res.StatusCode
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) LaunchJob(req *job.Request) (*job.Stats, error) {
|
||||
return suite.controller.LaunchJob(req)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) GetJob(jobID string) (*job.Stats, error) {
|
||||
return suite.controller.GetJob(jobID)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) StopJob(jobID string) error {
|
||||
return suite.controller.StopJob(jobID)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) RetryJob(jobID string) error {
|
||||
return suite.controller.RetryJob(jobID)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) CheckStatus() (*worker.Stats, error) {
|
||||
return suite.controller.CheckStatus()
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) GetJobLogData(jobID string) ([]byte, error) {
|
||||
return suite.controller.GetJobLogData(jobID)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
return suite.controller.GetPeriodicExecutions(periodicJobID, query)
|
||||
}
|
||||
|
||||
func (suite *APIHandlerTestSuite) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
return suite.controller.ScheduledJobs(query)
|
||||
}
|
||||
|
||||
type fakeController struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (fc *fakeController) LaunchJob(req *job.Request) (*job.Stats, error) {
|
||||
args := fc.Called(req)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*job.Stats), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) GetJob(jobID string) (*job.Stats, error) {
|
||||
args := fc.Called(jobID)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*job.Stats), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) StopJob(jobID string) error {
|
||||
args := fc.Called(jobID)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (fc *fakeController) RetryJob(jobID string) error {
|
||||
args := fc.Called(jobID)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func (fc *fakeController) CheckStatus() (*worker.Stats, error) {
|
||||
args := fc.Called()
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*worker.Stats), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) GetJobLogData(jobID string) ([]byte, error) {
|
||||
args := fc.Called(jobID)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).([]byte), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
args := fc.Called(periodicJobID, query)
|
||||
if args.Error(2) != nil {
|
||||
return nil, args.Get(1).(int64), args.Error(2)
|
||||
}
|
||||
|
||||
return args.Get(0).([]*job.Stats), args.Get(1).(int64), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
args := fc.Called(query)
|
||||
if args.Error(2) != nil {
|
||||
return nil, args.Get(1).(int64), args.Error(2)
|
||||
}
|
||||
|
||||
return args.Get(0).([]*job.Stats), args.Get(1).(int64), nil
|
||||
}
|
||||
|
||||
func createJobStats(name, kind, cron string) *job.Stats {
|
||||
now := time.Now()
|
||||
params := make(job.Parameters)
|
||||
params["image"] = "testing:v1"
|
||||
name := "fake_job_ok"
|
||||
if !ok {
|
||||
name = "fake_job_error"
|
||||
|
||||
return &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: "fake_job_ID",
|
||||
Status: job.PendingStatus.String(),
|
||||
JobName: name,
|
||||
JobKind: kind,
|
||||
IsUnique: false,
|
||||
RefLink: "/api/v1/jobs/fake_job_ID",
|
||||
CronSpec: cron,
|
||||
RunAt: now.Add(100 * time.Second).Unix(),
|
||||
EnqueueTime: now.Unix(),
|
||||
UpdateTime: now.Unix(),
|
||||
Parameters: params,
|
||||
},
|
||||
}
|
||||
req := &models.JobRequest{
|
||||
Job: &models.JobData{
|
||||
Name: name,
|
||||
}
|
||||
|
||||
func getResult(res []byte) (*job.Stats, error) {
|
||||
obj := &job.Stats{}
|
||||
err := json.Unmarshal(res, obj)
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
func createJobReq() *job.Request {
|
||||
params := make(job.Parameters)
|
||||
params["image"] = "testing:v1"
|
||||
|
||||
return &job.Request{
|
||||
Job: &job.RequestBody{
|
||||
Name: "my-testing-job",
|
||||
Parameters: params,
|
||||
Metadata: &models.JobMetadata{
|
||||
Metadata: &job.Metadata{
|
||||
JobKind: "Periodic",
|
||||
Cron: "5 * * * * *",
|
||||
IsUnique: true,
|
||||
@ -317,178 +514,10 @@ func createJobReq(ok bool) []byte {
|
||||
StatusHook: "http://localhost:39999",
|
||||
},
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(req)
|
||||
return data
|
||||
}
|
||||
|
||||
func createJobActionReq(action string) ([]byte, error) {
|
||||
actionReq := models.JobActionRequest{
|
||||
func createJobActionReq(action string) *job.ActionRequest {
|
||||
return &job.ActionRequest{
|
||||
Action: action,
|
||||
}
|
||||
|
||||
return json.Marshal(&actionReq)
|
||||
}
|
||||
|
||||
func postReq(url string, data []byte) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodPost, url, strings.NewReader(string(data)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
resData []byte
|
||||
)
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.ContentLength > 0 {
|
||||
resData, err = ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusNoContent {
|
||||
return resData, nil
|
||||
}
|
||||
|
||||
return resData, fmt.Errorf("expect status code '200,201,202,204', but got '%d'", res.StatusCode)
|
||||
}
|
||||
|
||||
func getReq(url string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set(authHeader, fmt.Sprintf("%s %s", secretPrefix, fakeSecret))
|
||||
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer res.Body.Close()
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return data, fmt.Errorf("expect status code '200', but got '%d'", res.StatusCode)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func exportUISecret(secret string) {
|
||||
os.Setenv("CORE_SECRET", secret)
|
||||
}
|
||||
|
||||
type fakeController struct{}
|
||||
|
||||
func (fc *fakeController) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
if req.Job.Name != "fake_job_ok" || req.Job.Metadata == nil {
|
||||
return models.JobStats{}, errors.New("failed")
|
||||
}
|
||||
|
||||
return createJobStats(req.Job.Name, req.Job.Metadata.JobKind, req.Job.Metadata.Cron), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) GetJob(jobID string) (models.JobStats, error) {
|
||||
if jobID != "fake_job_ok" {
|
||||
return models.JobStats{}, errors.New("failed")
|
||||
}
|
||||
|
||||
return createJobStats("testing", "Generic", ""), nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) StopJob(jobID string) error {
|
||||
if jobID == "fake_job_ok" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("failed")
|
||||
}
|
||||
|
||||
func (fc *fakeController) RetryJob(jobID string) error {
|
||||
if jobID == "fake_job_ok" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("failed")
|
||||
}
|
||||
|
||||
func (fc *fakeController) CancelJob(jobID string) error {
|
||||
if jobID == "fake_job_ok" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("failed")
|
||||
}
|
||||
|
||||
func (fc *fakeController) CheckStatus() (models.JobPoolStats, error) {
|
||||
return models.JobPoolStats{
|
||||
Pools: []*models.JobPoolStatsData{{
|
||||
WorkerPoolID: "fake_pool_ID",
|
||||
Status: "running",
|
||||
StartedAt: time.Now().Unix(),
|
||||
}},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fc *fakeController) GetJobLogData(jobID string) ([]byte, error) {
|
||||
if jobID == "fake_job_ok" {
|
||||
return []byte("job log"), nil
|
||||
}
|
||||
|
||||
return nil, errors.New("failed")
|
||||
}
|
||||
|
||||
func createJobStats(name, kind, cron string) models.JobStats {
|
||||
now := time.Now()
|
||||
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_ID_ok",
|
||||
Status: "pending",
|
||||
JobName: name,
|
||||
JobKind: kind,
|
||||
IsUnique: false,
|
||||
RefLink: "/api/v1/jobs/fake_ID_ok",
|
||||
CronSpec: cron,
|
||||
RunAt: now.Add(100 * time.Second).Unix(),
|
||||
EnqueueTime: now.Unix(),
|
||||
UpdateTime: now.Unix(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getResult(res []byte) (models.JobStats, error) {
|
||||
obj := models.JobStats{}
|
||||
err := json.Unmarshal(res, &obj)
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
func createServer() (*Server, uint, *env.Context) {
|
||||
port := uint(30000 + rand.Intn(10000))
|
||||
config := ServerConfig{
|
||||
Protocol: "http",
|
||||
Port: port,
|
||||
}
|
||||
ctx := &env.Context{
|
||||
SystemContext: context.Background(),
|
||||
WG: new(sync.WaitGroup),
|
||||
ErrorChan: make(chan error, 1),
|
||||
}
|
||||
server := NewServer(ctx, testingRouter, config)
|
||||
return server, port, ctx
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
@ -68,9 +69,12 @@ func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.String() != fmt.Sprintf("%s/%s/stats", baseRoute, apiVersion) {
|
||||
if err := br.authenticator.DoAuth(req); err != nil {
|
||||
authErr := errs.UnauthorizedError(err)
|
||||
if authErr == nil {
|
||||
authErr = errors.Errorf("unauthorized: %s", err)
|
||||
}
|
||||
logger.Errorf("Serve http request '%s %s' failed with error: %s", req.Method, req.URL.String(), authErr.Error())
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
w.Write([]byte(authErr.Error()))
|
||||
writeDate(w, []byte(authErr.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -84,8 +88,10 @@ func (br *BaseRouter) registerRoutes() {
|
||||
subRouter := br.router.PathPrefix(fmt.Sprintf("%s/%s", baseRoute, apiVersion)).Subrouter()
|
||||
|
||||
subRouter.HandleFunc("/jobs", br.handler.HandleLaunchJobReq).Methods(http.MethodPost)
|
||||
subRouter.HandleFunc("/jobs/scheduled", br.handler.HandleScheduledJobs).Methods(http.MethodGet)
|
||||
subRouter.HandleFunc("/jobs/{job_id}", br.handler.HandleGetJobReq).Methods(http.MethodGet)
|
||||
subRouter.HandleFunc("/jobs/{job_id}", br.handler.HandleJobActionReq).Methods(http.MethodPost)
|
||||
subRouter.HandleFunc("/jobs/{job_id}/log", br.handler.HandleJobLogReq).Methods(http.MethodGet)
|
||||
subRouter.HandleFunc("/stats", br.handler.HandleCheckStatusReq).Methods(http.MethodGet)
|
||||
subRouter.HandleFunc("/jobs/{job_id}/executions", br.handler.HandlePeriodicExecutions).Methods(http.MethodGet)
|
||||
}
|
||||
|
@ -15,14 +15,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
@ -38,7 +37,7 @@ type Server struct {
|
||||
config ServerConfig
|
||||
|
||||
// The context
|
||||
context *env.Context
|
||||
context context.Context
|
||||
}
|
||||
|
||||
// ServerConfig contains the configurations of Server.
|
||||
@ -57,7 +56,7 @@ type ServerConfig struct {
|
||||
}
|
||||
|
||||
// NewServer is constructor of Server.
|
||||
func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
|
||||
func NewServer(ctx context.Context, router Router, cfg ServerConfig) *Server {
|
||||
apiServer := &Server{
|
||||
router: router,
|
||||
config: cfg,
|
||||
@ -96,39 +95,23 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
|
||||
}
|
||||
|
||||
// Start the server to serve requests.
|
||||
func (s *Server) Start() {
|
||||
s.context.WG.Add(1)
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
defer func() {
|
||||
s.context.WG.Done()
|
||||
logger.Infof("API server is gracefully shutdown")
|
||||
}()
|
||||
|
||||
if s.config.Protocol == config.JobServiceProtocolHTTPS {
|
||||
err = s.httpServer.ListenAndServeTLS(s.config.Cert, s.config.Key)
|
||||
} else {
|
||||
err = s.httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
s.context.ErrorChan <- err
|
||||
}
|
||||
// Blocking call
|
||||
func (s *Server) Start() error {
|
||||
defer func() {
|
||||
logger.Info("API server is stopped")
|
||||
}()
|
||||
|
||||
if s.config.Protocol == config.JobServiceProtocolHTTPS {
|
||||
return s.httpServer.ListenAndServeTLS(s.config.Cert, s.config.Key)
|
||||
}
|
||||
|
||||
return s.httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
// Stop server gracefully.
|
||||
func (s *Server) Stop() {
|
||||
go func() {
|
||||
defer func() {
|
||||
logger.Info("Stop API server done!")
|
||||
}()
|
||||
shutDownCtx, cancel := context.WithTimeout(s.context.SystemContext, 10*time.Second)
|
||||
defer cancel()
|
||||
func (s *Server) Stop() error {
|
||||
shutDownCtx, cancel := context.WithTimeout(s.context, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := s.httpServer.Shutdown(shutDownCtx); err != nil {
|
||||
logger.Errorf("Shutdown API server failed with error: %s\n", err)
|
||||
}
|
||||
}()
|
||||
return s.httpServer.Shutdown(shutDownCtx)
|
||||
}
|
||||
|
52
src/jobservice/common/query/q.go
Normal file
52
src/jobservice/common/query/q.go
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package query
|
||||
|
||||
const (
|
||||
// DefaultPageSize defines the default page size
|
||||
DefaultPageSize uint = 25
|
||||
// ParamKeyPage defines query param key of page number
|
||||
ParamKeyPage = "page_number"
|
||||
// ParamKeyPageSize defines query param key of page size
|
||||
ParamKeyPageSize = "page_size"
|
||||
// ParamKeyNonStoppedOnly defines query param key of querying non stopped periodic executions
|
||||
ParamKeyNonStoppedOnly = "non_dead_only"
|
||||
// ExtraParamKeyNonStoppedOnly defines extra parameter key for querying non stopped periodic executions
|
||||
ExtraParamKeyNonStoppedOnly = "NonDeadOnly"
|
||||
)
|
||||
|
||||
// ExtraParameters to keep non pagination query parameters
|
||||
type ExtraParameters map[string]interface{}
|
||||
|
||||
// Set extra parameters
|
||||
func (ep ExtraParameters) Set(key string, v interface{}) {
|
||||
if len(key) > 0 {
|
||||
ep[key] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Get the extra parameter by key
|
||||
func (ep ExtraParameters) Get(key string) (interface{}, bool) {
|
||||
v, ok := ep[key]
|
||||
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// Parameter for getting executions
|
||||
type Parameter struct {
|
||||
PageNumber uint
|
||||
PageSize uint
|
||||
Extras ExtraParameters
|
||||
}
|
29
src/jobservice/common/query/q_test.go
Normal file
29
src/jobservice/common/query/q_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// QueryTestSuite tests q
|
||||
type QueryTestSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
// TestQueryTestSuite is entry of go test
|
||||
func TestQueryTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(QueryTestSuite))
|
||||
}
|
||||
|
||||
// TestExtraParams tests extra parameters
|
||||
func (suite *QueryTestSuite) TestExtraParams() {
|
||||
extras := make(ExtraParameters)
|
||||
extras.Set("a", 100)
|
||||
v, ok := extras.Get("a")
|
||||
|
||||
assert.Equal(suite.T(), true, ok)
|
||||
assert.Equal(suite.T(), 100, v.(int))
|
||||
}
|
@ -12,27 +12,33 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package utils
|
||||
package rds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func generateScore() int64 {
|
||||
ticks := time.Now().Unix()
|
||||
rand := rand.New(rand.NewSource(ticks))
|
||||
return ticks + rand.Int63n(1000) // Double confirm to avoid potential duplications
|
||||
// Functions defined here are mainly from dep lib "github.com/gocraft/work".
|
||||
// Only for compatible
|
||||
|
||||
// RedisNamespacePrefix ... Same with 'KeyNamespacePrefix', only for compatibility.
|
||||
func RedisNamespacePrefix(namespace string) string {
|
||||
return KeyNamespacePrefix(namespace)
|
||||
}
|
||||
|
||||
// MakePeriodicPolicyUUID returns an UUID for the periodic policy.
|
||||
func MakePeriodicPolicyUUID() (string, int64) {
|
||||
score := generateScore()
|
||||
return MakeIdentifier(), score
|
||||
// RedisKeyScheduled returns key of scheduled job.
|
||||
func RedisKeyScheduled(namespace string) string {
|
||||
return RedisNamespacePrefix(namespace) + "scheduled"
|
||||
}
|
||||
|
||||
// RedisKeyLastPeriodicEnqueue returns key of timestamp if last periodic enqueue.
|
||||
func RedisKeyLastPeriodicEnqueue(namespace string) string {
|
||||
return RedisNamespacePrefix(namespace) + "last_periodic_enqueue"
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------
|
||||
|
||||
// KeyNamespacePrefix returns the based key based on the namespace.
|
||||
func KeyNamespacePrefix(namespace string) string {
|
||||
ns := strings.TrimSpace(namespace)
|
||||
@ -53,16 +59,6 @@ func KeyPeriodicPolicy(namespace string) string {
|
||||
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "policies")
|
||||
}
|
||||
|
||||
// KeyPeriodicPolicyScore returns the key of policy key and score mapping.
|
||||
func KeyPeriodicPolicyScore(namespace string) string {
|
||||
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "key_score")
|
||||
}
|
||||
|
||||
// KeyPeriodicJobTimeSlots returns the key of the time slots of scheduled jobs.
|
||||
func KeyPeriodicJobTimeSlots(namespace string) string {
|
||||
return fmt.Sprintf("%s:%s", KeyPeriod(namespace), "scheduled_slots")
|
||||
}
|
||||
|
||||
// KeyPeriodicNotification returns the key of periodic pub/sub channel.
|
||||
func KeyPeriodicNotification(namespace string) string {
|
||||
return fmt.Sprintf("%s:%s", KeyPeriodicPolicy(namespace), "notifications")
|
||||
@ -78,12 +74,17 @@ func KeyJobStats(namespace string, jobID string) string {
|
||||
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "job_stats", jobID)
|
||||
}
|
||||
|
||||
// KeyJobCtlCommands returns the key for publishing ctl commands like 'stop' etc.
|
||||
func KeyJobCtlCommands(namespace string, jobID string) string {
|
||||
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "ctl_commands", jobID)
|
||||
}
|
||||
|
||||
// KeyUpstreamJobAndExecutions returns the key for persisting executions.
|
||||
func KeyUpstreamJobAndExecutions(namespace, upstreamJobID string) string {
|
||||
return fmt.Sprintf("%s%s:%s", KeyNamespacePrefix(namespace), "executions", upstreamJobID)
|
||||
}
|
||||
|
||||
// KeyHookEventRetryQueue returns the key of hook event retrying queue
|
||||
func KeyHookEventRetryQueue(namespace string) string {
|
||||
return fmt.Sprintf("%s%s", KeyNamespacePrefix(namespace), "hook_events")
|
||||
}
|
||||
|
||||
// KeyStatusUpdateRetryQueue returns the key of status change retrying queue
|
||||
func KeyStatusUpdateRetryQueue(namespace string) string {
|
||||
return fmt.Sprintf("%s%s", KeyNamespacePrefix(namespace), "status_change_events")
|
||||
}
|
156
src/jobservice/common/rds/utils.go
Normal file
156
src/jobservice/common/rds/utils.go
Normal file
@ -0,0 +1,156 @@
|
||||
package rds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrNoElements is a pre defined error to describe the case that no elements got
|
||||
// from the backend database.
|
||||
var ErrNoElements = errors.New("no elements got from the backend")
|
||||
|
||||
// HmSet sets the properties of hash map
|
||||
func HmSet(conn redis.Conn, key string, fieldAndValues ...interface{}) error {
|
||||
if conn == nil {
|
||||
return errors.New("nil redis connection")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(key) {
|
||||
return errors.New("no key specified to do HMSET")
|
||||
}
|
||||
|
||||
if len(fieldAndValues) == 0 {
|
||||
return errors.New("no properties specified to do HMSET")
|
||||
}
|
||||
|
||||
args := make([]interface{}, 0, len(fieldAndValues)+2)
|
||||
|
||||
args = append(args, key)
|
||||
args = append(args, fieldAndValues...)
|
||||
args = append(args, "update_time", time.Now().Unix()) // Add update timestamp
|
||||
|
||||
_, err := conn.Do("HMSET", args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// HmGet gets values of multiple fields
|
||||
// Values have same order with the provided fields
|
||||
func HmGet(conn redis.Conn, key string, fields ...interface{}) ([]interface{}, error) {
|
||||
if conn == nil {
|
||||
return nil, errors.New("nil redis connection")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(key) {
|
||||
return nil, errors.New("no key specified to do HMGET")
|
||||
}
|
||||
|
||||
if len(fields) == 0 {
|
||||
return nil, errors.New("no fields specified to do HMGET")
|
||||
}
|
||||
|
||||
args := make([]interface{}, 0, len(fields)+1)
|
||||
args = append(args, key)
|
||||
args = append(args, fields...)
|
||||
|
||||
return redis.Values(conn.Do("HMGET", args...))
|
||||
}
|
||||
|
||||
// JobScore represents the data item with score in the redis db.
|
||||
type JobScore struct {
|
||||
JobBytes []byte
|
||||
Score int64
|
||||
}
|
||||
|
||||
// GetZsetByScore get the items from the zset filtered by the specified score scope.
|
||||
func GetZsetByScore(conn redis.Conn, key string, scores []int64) ([]JobScore, error) {
|
||||
if conn == nil {
|
||||
return nil, errors.New("nil redis conn when getting zset by score")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(key) {
|
||||
return nil, errors.New("missing key when getting zset by score")
|
||||
}
|
||||
|
||||
if len(scores) < 2 {
|
||||
return nil, errors.New("bad arguments: not enough scope scores provided")
|
||||
}
|
||||
|
||||
values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, scores[0], scores[1], "WITHSCORES"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var jobsWithScores []JobScore
|
||||
|
||||
if err := redis.ScanSlice(values, &jobsWithScores); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return jobsWithScores, nil
|
||||
}
|
||||
|
||||
// AcquireLock acquires a redis lock with specified expired time
|
||||
func AcquireLock(conn redis.Conn, lockerKey string, lockerID string, expireTime int64) error {
|
||||
args := []interface{}{lockerKey, lockerID, "NX", "EX", expireTime}
|
||||
res, err := conn.Do("SET", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Existing, the value can not be override
|
||||
if res == nil {
|
||||
return fmt.Errorf("key %s is already set with value %v", lockerKey, lockerID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReleaseLock releases the acquired lock
|
||||
func ReleaseLock(conn redis.Conn, lockerKey string, lockerID string) error {
|
||||
theID, err := redis.String(conn.Do("GET", lockerKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if theID == lockerID {
|
||||
_, err := conn.Do("DEL", lockerKey)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("locker ID mismatch")
|
||||
}
|
||||
|
||||
// ZPopMin pops the element with lowest score in the zset
|
||||
func ZPopMin(conn redis.Conn, key string) (interface{}, error) {
|
||||
err := conn.Send("MULTI")
|
||||
err = conn.Send("ZRANGE", key, 0, 0) // lowest one
|
||||
err = conn.Send("ZREMRANGEBYRANK", key, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
replies, err := redis.Values(conn.Do("EXEC"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(replies) < 2 {
|
||||
return nil, errors.Errorf("zpopmin error: not enough results returned, expected %d but got %d", 2, len(replies))
|
||||
}
|
||||
|
||||
zrangeReply := replies[0]
|
||||
if zrangeReply != nil {
|
||||
if elements, ok := zrangeReply.([]interface{}); ok {
|
||||
if len(elements) == 0 {
|
||||
return nil, ErrNoElements
|
||||
}
|
||||
|
||||
return elements[0], nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("zpopmin error: bad result reply")
|
||||
}
|
139
src/jobservice/common/rds/utils_test.go
Normal file
139
src/jobservice/common/rds/utils_test.go
Normal file
@ -0,0 +1,139 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rds
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For testing
|
||||
type simpleStatusChange struct {
|
||||
JobID string
|
||||
}
|
||||
|
||||
// RdsUtilsTestSuite tests functions located in rds package
|
||||
type RdsUtilsTestSuite struct {
|
||||
suite.Suite
|
||||
pool *redis.Pool
|
||||
namespace string
|
||||
conn redis.Conn
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *RdsUtilsTestSuite) SetupSuite() {
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
}
|
||||
|
||||
// SetupTest prepares test cases
|
||||
func (suite *RdsUtilsTestSuite) SetupTest() {
|
||||
suite.conn = suite.pool.Get()
|
||||
}
|
||||
|
||||
// TearDownTest clears test cases
|
||||
func (suite *RdsUtilsTestSuite) TearDownTest() {
|
||||
err := suite.conn.Close()
|
||||
assert.NoError(suite.T(), err, "close conn: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// TearDownSuite clears test suite
|
||||
func (suite *RdsUtilsTestSuite) TearDownSuite() {
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
err := tests.ClearAll(suite.namespace, conn)
|
||||
assert.NoError(suite.T(), err, "clear all: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// TestZPopMin ...
|
||||
func (suite *RdsUtilsTestSuite) TestZPopMin() {
|
||||
s1 := &simpleStatusChange{"a"}
|
||||
s2 := &simpleStatusChange{"b"}
|
||||
|
||||
raw1, _ := json.Marshal(s1)
|
||||
raw2, _ := json.Marshal(s2)
|
||||
|
||||
key := KeyStatusUpdateRetryQueue(suite.namespace)
|
||||
_, err := suite.conn.Do("ZADD", key, time.Now().Unix(), raw1)
|
||||
_, err = suite.conn.Do("ZADD", key, time.Now().Unix()+5, raw2)
|
||||
require.Nil(suite.T(), err, "zadd objects error should be nil")
|
||||
|
||||
v, err := ZPopMin(suite.conn, key)
|
||||
require.Nil(suite.T(), err, "nil error should be returned by calling ZPopMin")
|
||||
|
||||
change1 := &simpleStatusChange{}
|
||||
_ = json.Unmarshal(v.([]byte), change1)
|
||||
assert.Equal(suite.T(), "a", change1.JobID, "job ID not equal")
|
||||
|
||||
v, err = ZPopMin(suite.conn, key)
|
||||
require.Nil(suite.T(), err, "nil error should be returned by calling ZPopMin")
|
||||
|
||||
change2 := &simpleStatusChange{}
|
||||
_ = json.Unmarshal(v.([]byte), change2)
|
||||
assert.Equal(suite.T(), "b", change2.JobID, "job ID not equal")
|
||||
}
|
||||
|
||||
// TestHmGetAndSet ...
|
||||
func (suite *RdsUtilsTestSuite) TestHmGetAndSet() {
|
||||
key := KeyJobStats(suite.namespace, "fake_job_id")
|
||||
err := HmSet(suite.conn, key, "a", "hello", "b", 100)
|
||||
require.Nil(suite.T(), err, "nil error should be returned for HmSet")
|
||||
|
||||
values, err := HmGet(suite.conn, key, "a", "b")
|
||||
require.Nil(suite.T(), err, "nil error should be returned for HmGet")
|
||||
assert.Equal(suite.T(), 2, len(values), "two values should be returned")
|
||||
assert.Equal(suite.T(), string(values[0].([]byte)), "hello")
|
||||
assert.Equal(suite.T(), string(values[1].([]byte)), "100")
|
||||
}
|
||||
|
||||
// TestAcquireAndReleaseLock ...
|
||||
func (suite *RdsUtilsTestSuite) TestAcquireAndReleaseLock() {
|
||||
key := KeyPeriodicLock(suite.namespace)
|
||||
err := AcquireLock(suite.conn, key, "RdsUtilsTestSuite", 60)
|
||||
assert.Nil(suite.T(), err, "nil error should be returned for 1st acquiring lock")
|
||||
|
||||
err = AcquireLock(suite.conn, key, "RdsUtilsTestSuite", 60)
|
||||
assert.NotNil(suite.T(), err, "non nil error should be returned for 2nd acquiring lock")
|
||||
|
||||
err = ReleaseLock(suite.conn, key, "RdsUtilsTestSuite")
|
||||
assert.Nil(suite.T(), err, "nil error should be returned for releasing lock")
|
||||
}
|
||||
|
||||
// TestGetZsetByScore ...
|
||||
func (suite *RdsUtilsTestSuite) TestGetZsetByScore() {
|
||||
key := KeyPeriod(suite.namespace)
|
||||
|
||||
count, err := suite.conn.Do("ZADD", key, 1, "hello", 2, "world")
|
||||
require.Nil(suite.T(), err, "nil error should be returned when adding prepared data by ZADD")
|
||||
require.Equal(suite.T(), int64(2), count.(int64), "two items should be added")
|
||||
|
||||
datas, err := GetZsetByScore(suite.conn, key, []int64{0, 2})
|
||||
require.Nil(suite.T(), err, "nil error should be returned when getting data with scores")
|
||||
assert.Equal(suite.T(), 2, len(datas), "expected 2 items but got %d", len(datas))
|
||||
}
|
||||
|
||||
// TestRdsUtilsTestSuite is suite entry for 'go test'
|
||||
func TestRdsUtilsTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(RdsUtilsTestSuite))
|
||||
}
|
@ -16,24 +16,37 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
// CtlContextKey is used to keep controller reference in the system context
|
||||
type CtlContextKey string
|
||||
// NodeIDContextKey is used to keep node ID in the system context
|
||||
type NodeIDContextKey string
|
||||
|
||||
const (
|
||||
// CtlKeyOfLaunchJobFunc is context key to keep the ctl launch job func
|
||||
CtlKeyOfLaunchJobFunc CtlContextKey = "controller_launch_job_func"
|
||||
// NodeID is const of the ID context key
|
||||
NodeID NodeIDContextKey = "node_id"
|
||||
)
|
||||
|
||||
// MakeIdentifier creates uuid for job.
|
||||
func MakeIdentifier() string {
|
||||
b := make([]byte, 12)
|
||||
_, err := io.ReadFull(rand.Reader, b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
|
||||
// IsEmptyStr check if the specified str is empty (len ==0) after triming prefix and suffix spaces.
|
||||
func IsEmptyStr(str string) bool {
|
||||
return len(strings.TrimSpace(str)) == 0
|
||||
@ -105,7 +118,7 @@ func TranslateRedisAddress(commaFormat string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
urlParts := []string{}
|
||||
urlParts := make([]string, 0)
|
||||
// section[0] should be host:port
|
||||
redisURL := fmt.Sprintf("redis://%s", sections[0])
|
||||
if _, err := url.Parse(redisURL); err != nil {
|
||||
@ -127,31 +140,48 @@ func TranslateRedisAddress(commaFormat string) (string, bool) {
|
||||
return strings.Join(urlParts, ""), true
|
||||
}
|
||||
|
||||
// JobScore represents the data item with score in the redis db.
|
||||
type JobScore struct {
|
||||
JobBytes []byte
|
||||
Score int64
|
||||
// SerializeJob encodes work.Job to json data.
|
||||
func SerializeJob(job *work.Job) ([]byte, error) {
|
||||
return json.Marshal(job)
|
||||
}
|
||||
|
||||
// GetZsetByScore get the items from the zset filtered by the specified score scope.
|
||||
func GetZsetByScore(pool *redis.Pool, key string, scores []int64) ([]JobScore, error) {
|
||||
if pool == nil || IsEmptyStr(key) || len(scores) < 2 {
|
||||
return nil, errors.New("bad arguments")
|
||||
}
|
||||
// DeSerializeJob decodes bytes to ptr of work.Job.
|
||||
func DeSerializeJob(jobBytes []byte) (*work.Job, error) {
|
||||
var j work.Job
|
||||
err := json.Unmarshal(jobBytes, &j)
|
||||
|
||||
conn := pool.Get()
|
||||
defer conn.Close()
|
||||
return &j, err
|
||||
}
|
||||
|
||||
values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, scores[0], scores[1], "WITHSCORES"))
|
||||
// ResolveHostnameAndIP gets the local hostname and IP
|
||||
func ResolveHostnameAndIP() (string, error) {
|
||||
host, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
var jobsWithScores []JobScore
|
||||
|
||||
if err := redis.ScanSlice(values, &jobsWithScores); err != nil {
|
||||
return nil, err
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return jobsWithScores, nil
|
||||
for _, address := range addrs {
|
||||
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
return fmt.Sprintf("%s:%s", host, ipnet.IP.String()), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("failed to resolve local host&ip")
|
||||
}
|
||||
|
||||
// GenerateNodeID returns ID of current node
|
||||
func GenerateNodeID() string {
|
||||
hIP, err := ResolveHostnameAndIP()
|
||||
if err != nil {
|
||||
return MakeIdentifier()
|
||||
}
|
||||
|
||||
return hIP
|
||||
}
|
@ -10,7 +10,7 @@ https_config:
|
||||
#Server listening port
|
||||
port: 9443
|
||||
|
||||
#Worker pool
|
||||
#Worker worker
|
||||
worker_pool:
|
||||
#Worker concurrency
|
||||
workers: 10
|
||||
@ -29,17 +29,14 @@ job_loggers:
|
||||
- name: "FILE"
|
||||
level: "DEBUG"
|
||||
settings: # Customized settings of logger
|
||||
base_dir: "/tmp/job_logs"
|
||||
base_dir: "/Users/szou/tmp/job_logs"
|
||||
sweeper:
|
||||
duration: 1 #days
|
||||
settings: # Customized settings of sweeper
|
||||
work_dir: "/tmp/job_logs"
|
||||
work_dir: "/Users/szou/tmp/job_logs"
|
||||
|
||||
#Loggers for the job service
|
||||
loggers:
|
||||
- name: "STD_OUTPUT" # Same with above
|
||||
level: "DEBUG"
|
||||
|
||||
#Admin server endpoint
|
||||
admin_server: "http://adminserver:9010/"
|
||||
|
||||
|
@ -23,21 +23,20 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
|
||||
jobServicePort = "JOB_SERVICE_PORT"
|
||||
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
|
||||
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
|
||||
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
|
||||
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
|
||||
jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL"
|
||||
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
|
||||
jobServiceCoreServerEndpoint = "CORE_URL"
|
||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
|
||||
jobServicePort = "JOB_SERVICE_PORT"
|
||||
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
|
||||
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
|
||||
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
|
||||
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
|
||||
jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL"
|
||||
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
|
||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||
|
||||
// JobServiceProtocolHTTPS points to the 'https' protocol
|
||||
JobServiceProtocolHTTPS = "https"
|
||||
@ -68,7 +67,7 @@ type Configuration struct {
|
||||
// Additional config when using https
|
||||
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
|
||||
|
||||
// Configurations of worker pool
|
||||
// Configurations of worker worker
|
||||
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
|
||||
|
||||
// Job logger configurations
|
||||
@ -84,13 +83,13 @@ type HTTPSConfig struct {
|
||||
Key string `yaml:"key"`
|
||||
}
|
||||
|
||||
// RedisPoolConfig keeps redis pool info.
|
||||
// RedisPoolConfig keeps redis worker info.
|
||||
type RedisPoolConfig struct {
|
||||
RedisURL string `yaml:"redis_url"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
// PoolConfig keeps worker pool configurations.
|
||||
// PoolConfig keeps worker worker configurations.
|
||||
type PoolConfig struct {
|
||||
// Worker concurrency
|
||||
WorkerCount uint `yaml:"workers"`
|
||||
@ -274,32 +273,32 @@ func (c *Configuration) validate() error {
|
||||
}
|
||||
|
||||
if c.PoolConfig == nil {
|
||||
return errors.New("no worker pool is configured")
|
||||
return errors.New("no worker worker is configured")
|
||||
}
|
||||
|
||||
if c.PoolConfig.Backend != JobServicePoolBackendRedis {
|
||||
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
|
||||
return fmt.Errorf("worker worker backend %s does not support", c.PoolConfig.Backend)
|
||||
}
|
||||
|
||||
// When backend is redis
|
||||
if c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
|
||||
return fmt.Errorf("redis worker must be configured when backend is set to '%s'", c.PoolConfig.Backend)
|
||||
}
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.RedisURL) {
|
||||
return errors.New("URL of redis pool is empty")
|
||||
return errors.New("URL of redis worker is empty")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(c.PoolConfig.RedisPoolCfg.RedisURL, redisSchema) {
|
||||
return errors.New("Invalid redis URL")
|
||||
return errors.New("invalid redis URL")
|
||||
}
|
||||
|
||||
if _, err := url.Parse(c.PoolConfig.RedisPoolCfg.RedisURL); err != nil {
|
||||
return fmt.Errorf("Invalid redis URL: %s", err.Error())
|
||||
return fmt.Errorf("invalid redis URL: %s", err.Error())
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Namespace) {
|
||||
return errors.New("namespace of redis pool is required")
|
||||
return errors.New("namespace of redis worker is required")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,121 +14,141 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConfigLoadingFailed(t *testing.T) {
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("./config.not-existing.yaml", false); err == nil {
|
||||
t.Fatalf("Load config from none-existing document, expect none nil error but got '%s'\n", err)
|
||||
}
|
||||
// ConfigurationTestSuite tests the configuration loading
|
||||
type ConfigurationTestSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func TestConfigLoadingSucceed(t *testing.T) {
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", false); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
// TestConfigurationTestSuite is suite entry for 'go test'
|
||||
func TestConfigurationTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(ConfigurationTestSuite))
|
||||
}
|
||||
|
||||
func TestConfigLoadingWithEnv(t *testing.T) {
|
||||
setENV()
|
||||
|
||||
// TestConfigLoadingFailed ...
|
||||
func (suite *ConfigurationTestSuite) TestConfigLoadingFailed() {
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if cfg.Protocol != "https" {
|
||||
t.Errorf("expect protocol 'https', but got '%s'\n", cfg.Protocol)
|
||||
}
|
||||
if cfg.Port != 8989 {
|
||||
t.Errorf("expect port 8989 but got '%d'\n", cfg.Port)
|
||||
}
|
||||
if cfg.PoolConfig.WorkerCount != 8 {
|
||||
t.Errorf("expect workcount 8 but go '%d'\n", cfg.PoolConfig.WorkerCount)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.RedisURL != "redis://arbitrary_username:password@8.8.8.8:6379/0" {
|
||||
t.Errorf("expect redis URL 'localhost' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.RedisURL)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Namespace != "ut_namespace" {
|
||||
t.Errorf("expect redis namespace 'ut_namespace' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Namespace)
|
||||
}
|
||||
if GetAuthSecret() != "js_secret" {
|
||||
t.Errorf("expect auth secret 'js_secret' but got '%s'", GetAuthSecret())
|
||||
}
|
||||
if GetUIAuthSecret() != "core_secret" {
|
||||
t.Errorf("expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret())
|
||||
}
|
||||
|
||||
unsetENV()
|
||||
err := cfg.Load("./config.not-existing.yaml", false)
|
||||
assert.NotNil(suite.T(), err, "load config from none-existing document, expect none nil error but got nil")
|
||||
}
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
if err := DefaultConfig.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
// TestConfigLoadingSucceed ...
|
||||
func (suite *ConfigurationTestSuite) TestConfigLoadingSucceed() {
|
||||
cfg := &Configuration{}
|
||||
err := cfg.Load("../config_test.yml", false)
|
||||
assert.Nil(suite.T(), err, "Load config from yaml file, expect nil error but got error '%s'", err)
|
||||
}
|
||||
|
||||
// TestConfigLoadingWithEnv ...
|
||||
func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() {
|
||||
err := setENV()
|
||||
require.Nil(suite.T(), err, "set envs: expect nil error but got error '%s'", err)
|
||||
|
||||
defer func() {
|
||||
err := unsetENV()
|
||||
require.Nil(suite.T(), err, "unset envs: expect nil error but got error '%s'", err)
|
||||
}()
|
||||
|
||||
cfg := &Configuration{}
|
||||
err = cfg.Load("../config_test.yml", true)
|
||||
require.Nil(suite.T(), err, "load config from yaml file, expect nil error but got error '%s'", err)
|
||||
|
||||
assert.Equal(suite.T(), "https", cfg.Protocol, "expect protocol 'https', but got '%s'", cfg.Protocol)
|
||||
assert.Equal(suite.T(), uint(8989), cfg.Port, "expect port 8989 but got '%d'", cfg.Port)
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
uint(8),
|
||||
cfg.PoolConfig.WorkerCount,
|
||||
"expect worker count 8 but go '%d'",
|
||||
cfg.PoolConfig.WorkerCount,
|
||||
)
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
"redis://arbitrary_username:password@8.8.8.8:6379/0",
|
||||
cfg.PoolConfig.RedisPoolCfg.RedisURL,
|
||||
"expect redis URL 'localhost' but got '%s'",
|
||||
cfg.PoolConfig.RedisPoolCfg.RedisURL,
|
||||
)
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
"ut_namespace",
|
||||
cfg.PoolConfig.RedisPoolCfg.Namespace,
|
||||
"expect redis namespace 'ut_namespace' but got '%s'",
|
||||
cfg.PoolConfig.RedisPoolCfg.Namespace,
|
||||
)
|
||||
assert.Equal(suite.T(), "js_secret", GetAuthSecret(), "expect auth secret 'js_secret' but got '%s'", GetAuthSecret())
|
||||
assert.Equal(suite.T(), "core_secret", GetUIAuthSecret(), "expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret())
|
||||
}
|
||||
|
||||
// TestDefaultConfig ...
|
||||
func (suite *ConfigurationTestSuite) TestDefaultConfig() {
|
||||
err := DefaultConfig.Load("../config_test.yml", true)
|
||||
require.Nil(suite.T(), err, "load config from yaml file, expect nil error but got error '%s'", err)
|
||||
|
||||
redisURL := DefaultConfig.PoolConfig.RedisPoolCfg.RedisURL
|
||||
if redisURL != "redis://localhost:6379" {
|
||||
t.Errorf("expect redisURL '%s' but got '%s'\n", "redis://localhost:6379", redisURL)
|
||||
}
|
||||
assert.Equal(suite.T(), "redis://localhost:6379", redisURL, "expect redisURL '%s' but got '%s'", "redis://localhost:6379", redisURL)
|
||||
|
||||
if len(DefaultConfig.JobLoggerConfigs) == 0 {
|
||||
t.Errorf("expect 2 job loggers configured but got %d", len(DefaultConfig.JobLoggerConfigs))
|
||||
}
|
||||
jLoggerCount := len(DefaultConfig.JobLoggerConfigs)
|
||||
assert.Equal(suite.T(), 2, jLoggerCount, "expect 2 job loggers configured but got %d", jLoggerCount)
|
||||
|
||||
if len(DefaultConfig.LoggerConfigs) == 0 {
|
||||
t.Errorf("expect 1 loggers configured but got %d", len(DefaultConfig.LoggerConfigs))
|
||||
}
|
||||
loggerCount := len(DefaultConfig.LoggerConfigs)
|
||||
assert.Equal(suite.T(), 1, loggerCount, "expect 1 loggers configured but got %d", loggerCount)
|
||||
|
||||
// Only verify the complicated one
|
||||
theLogger := DefaultConfig.JobLoggerConfigs[1]
|
||||
if theLogger.Name != "FILE" {
|
||||
t.Fatalf("expect FILE logger but got %s", theLogger.Name)
|
||||
}
|
||||
if theLogger.Level != "INFO" {
|
||||
t.Errorf("expect INFO log level of FILE logger but got %s", theLogger.Level)
|
||||
}
|
||||
if len(theLogger.Settings) == 0 {
|
||||
t.Errorf("expect extra settings but got nothing")
|
||||
}
|
||||
if theLogger.Settings["base_dir"] != "/tmp/job_logs" {
|
||||
t.Errorf("expect extra setting base_dir to be '/tmp/job_logs' but got %s", theLogger.Settings["base_dir"])
|
||||
}
|
||||
if theLogger.Sweeper == nil {
|
||||
t.Fatalf("expect non nil sweeper of FILE logger but got nil")
|
||||
}
|
||||
if theLogger.Sweeper.Duration != 5 {
|
||||
t.Errorf("expect sweep duration to be 5 but got %d", theLogger.Sweeper.Duration)
|
||||
}
|
||||
if theLogger.Sweeper.Settings["work_dir"] != "/tmp/job_logs" {
|
||||
t.Errorf("expect work dir of sweeper of FILE logger to be '/tmp/job_logs' but got %s", theLogger.Sweeper.Settings["work_dir"])
|
||||
}
|
||||
assert.Equal(suite.T(), "FILE", theLogger.Name, "expect FILE logger but got %s", theLogger.Name)
|
||||
assert.Equal(suite.T(), "INFO", theLogger.Level, "expect INFO log level of FILE logger but got %s", theLogger.Level)
|
||||
assert.NotEqual(suite.T(), 0, len(theLogger.Settings), "expect extra settings but got nothing")
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
"/tmp/job_logs",
|
||||
theLogger.Settings["base_dir"],
|
||||
"expect extra setting base_dir to be '/tmp/job_logs' but got %s",
|
||||
theLogger.Settings["base_dir"],
|
||||
)
|
||||
assert.NotNil(suite.T(), theLogger.Sweeper, "expect non nil sweeper of FILE logger but got nil")
|
||||
assert.Equal(suite.T(), 5, theLogger.Sweeper.Duration, "expect sweep duration to be 5 but got %d", theLogger.Sweeper.Duration)
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
"/tmp/job_logs",
|
||||
theLogger.Sweeper.Settings["work_dir"],
|
||||
"expect work dir of sweeper of FILE logger to be '/tmp/job_logs' but got %s",
|
||||
theLogger.Sweeper.Settings["work_dir"],
|
||||
)
|
||||
}
|
||||
|
||||
func setENV() {
|
||||
os.Setenv("JOB_SERVICE_PROTOCOL", "https")
|
||||
os.Setenv("JOB_SERVICE_PORT", "8989")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
|
||||
os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
|
||||
os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
|
||||
os.Setenv("JOBSERVICE_SECRET", "js_secret")
|
||||
os.Setenv("CORE_SECRET", "core_secret")
|
||||
func setENV() error {
|
||||
err := os.Setenv("JOB_SERVICE_PROTOCOL", "https")
|
||||
err = os.Setenv("JOB_SERVICE_PORT", "8989")
|
||||
err = os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
|
||||
err = os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
|
||||
err = os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
|
||||
err = os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
|
||||
err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0")
|
||||
err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
|
||||
err = os.Setenv("JOBSERVICE_SECRET", "js_secret")
|
||||
err = os.Setenv("CORE_SECRET", "core_secret")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func unsetENV() {
|
||||
os.Unsetenv("JOB_SERVICE_PROTOCOL")
|
||||
os.Unsetenv("JOB_SERVICE_PORT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_URL")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
|
||||
os.Unsetenv("JOBSERVICE_SECRET")
|
||||
os.Unsetenv("CORE_SECRET")
|
||||
func unsetENV() error {
|
||||
err := os.Unsetenv("JOB_SERVICE_PROTOCOL")
|
||||
err = os.Unsetenv("JOB_SERVICE_PORT")
|
||||
err = os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
|
||||
err = os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
|
||||
err = os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
|
||||
err = os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
|
||||
err = os.Unsetenv("JOB_SERVICE_POOL_REDIS_URL")
|
||||
err = os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
|
||||
err = os.Unsetenv("JOBSERVICE_SECRET")
|
||||
err = os.Unsetenv("CORE_SECRET")
|
||||
|
||||
return err
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ https_config:
|
||||
#Server listening port
|
||||
port: 9444
|
||||
|
||||
#Worker pool
|
||||
#Worker worker
|
||||
worker_pool:
|
||||
#Worker concurrency
|
||||
workers: 10
|
||||
@ -20,7 +20,7 @@ worker_pool:
|
||||
#redis://[arbitrary_username:password@]ipaddress:port/database_index
|
||||
#or ipaddress:port[,weight,password,database_index]
|
||||
redis_url: "localhost:6379"
|
||||
namespace: "harbor_job_service"
|
||||
namespace: "testing_job_service_v2"
|
||||
|
||||
#Loggers for the running job
|
||||
job_loggers:
|
||||
@ -39,7 +39,3 @@ job_loggers:
|
||||
loggers:
|
||||
- name: "STD_OUTPUT" # Same with above
|
||||
level: "DEBUG"
|
||||
|
||||
#Admin server endpoint
|
||||
admin_server: "http://127.0.0.1:8888"
|
||||
|
||||
|
@ -15,129 +15,127 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/pool"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/worker"
|
||||
"github.com/robfig/cron"
|
||||
)
|
||||
|
||||
const (
|
||||
hookActivated = "activated"
|
||||
hookDeactivated = "error"
|
||||
)
|
||||
|
||||
// Controller implement the core interface and provides related job handle methods.
|
||||
// Controller will coordinate the lower components to complete the process as a commander role.
|
||||
type Controller struct {
|
||||
// Refer the backend pool
|
||||
backendPool pool.Interface
|
||||
// basicController implement the core interface and provides related job handle methods.
|
||||
// basicController will coordinate the lower components to complete the process as a commander role.
|
||||
type basicController struct {
|
||||
// Refer the backend worker
|
||||
backendWorker worker.Interface
|
||||
// Refer the job life cycle management controller
|
||||
ctl lcm.Controller
|
||||
}
|
||||
|
||||
// NewController is constructor of Controller.
|
||||
func NewController(backendPool pool.Interface) *Controller {
|
||||
return &Controller{
|
||||
backendPool: backendPool,
|
||||
// NewController is constructor of basicController.
|
||||
func NewController(backendWorker worker.Interface, ctl lcm.Controller) Interface {
|
||||
return &basicController{
|
||||
backendWorker: backendWorker,
|
||||
ctl: ctl,
|
||||
}
|
||||
}
|
||||
|
||||
// LaunchJob is implementation of same method in core interface.
|
||||
func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
func (bc *basicController) LaunchJob(req *job.Request) (res *job.Stats, err error) {
|
||||
if err := validJobReq(req); err != nil {
|
||||
return models.JobStats{}, err
|
||||
return nil, errs.BadRequestError(err)
|
||||
}
|
||||
|
||||
// Validate job name
|
||||
jobType, isKnownJob := c.backendPool.IsKnownJob(req.Job.Name)
|
||||
jobType, isKnownJob := bc.backendWorker.IsKnownJob(req.Job.Name)
|
||||
if !isKnownJob {
|
||||
return models.JobStats{}, fmt.Errorf("job with name '%s' is unknown", req.Job.Name)
|
||||
return nil, errs.BadRequestError(errors.Errorf("job with name '%s' is unknown", req.Job.Name))
|
||||
}
|
||||
|
||||
// Validate parameters
|
||||
if err := c.backendPool.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
|
||||
return models.JobStats{}, err
|
||||
if err := bc.backendWorker.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
|
||||
return nil, errs.BadRequestError(err)
|
||||
}
|
||||
|
||||
// Enqueue job regarding of the kind
|
||||
var (
|
||||
res models.JobStats
|
||||
err error
|
||||
)
|
||||
switch req.Job.Metadata.JobKind {
|
||||
case job.JobKindScheduled:
|
||||
res, err = c.backendPool.Schedule(
|
||||
case job.KindScheduled:
|
||||
res, err = bc.backendWorker.Schedule(
|
||||
req.Job.Name,
|
||||
req.Job.Parameters,
|
||||
req.Job.Metadata.ScheduleDelay,
|
||||
req.Job.Metadata.IsUnique)
|
||||
case job.JobKindPeriodic:
|
||||
res, err = c.backendPool.PeriodicallyEnqueue(
|
||||
req.Job.Metadata.IsUnique,
|
||||
req.Job.StatusHook,
|
||||
)
|
||||
case job.KindPeriodic:
|
||||
res, err = bc.backendWorker.PeriodicallyEnqueue(
|
||||
req.Job.Name,
|
||||
req.Job.Parameters,
|
||||
req.Job.Metadata.Cron)
|
||||
req.Job.Metadata.Cron,
|
||||
req.Job.Metadata.IsUnique,
|
||||
req.Job.StatusHook,
|
||||
)
|
||||
default:
|
||||
res, err = c.backendPool.Enqueue(req.Job.Name, req.Job.Parameters, req.Job.Metadata.IsUnique)
|
||||
res, err = bc.backendWorker.Enqueue(
|
||||
req.Job.Name,
|
||||
req.Job.Parameters,
|
||||
req.Job.Metadata.IsUnique,
|
||||
req.Job.StatusHook,
|
||||
)
|
||||
}
|
||||
|
||||
// Register status hook?
|
||||
// Save job stats
|
||||
if err == nil {
|
||||
if !utils.IsEmptyStr(req.Job.StatusHook) {
|
||||
if err := c.backendPool.RegisterHook(res.Stats.JobID, req.Job.StatusHook); err != nil {
|
||||
res.Stats.HookStatus = hookDeactivated
|
||||
} else {
|
||||
res.Stats.HookStatus = hookActivated
|
||||
}
|
||||
if _, err := bc.ctl.New(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return res, err
|
||||
return
|
||||
}
|
||||
|
||||
// GetJob is implementation of same method in core interface.
|
||||
func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
|
||||
func (bc *basicController) GetJob(jobID string) (*job.Stats, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return models.JobStats{}, errors.New("empty job ID")
|
||||
return nil, errs.BadRequestError(errors.New("empty job ID"))
|
||||
}
|
||||
|
||||
return c.backendPool.GetJobStats(jobID)
|
||||
t, err := bc.ctl.Track(jobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t.Job(), nil
|
||||
}
|
||||
|
||||
// StopJob is implementation of same method in core interface.
|
||||
func (c *Controller) StopJob(jobID string) error {
|
||||
func (bc *basicController) StopJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
return errs.BadRequestError(errors.New("empty job ID"))
|
||||
}
|
||||
|
||||
return c.backendPool.StopJob(jobID)
|
||||
}
|
||||
|
||||
// CancelJob is implementation of same method in core interface.
|
||||
func (c *Controller) CancelJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
return c.backendPool.CancelJob(jobID)
|
||||
return bc.backendWorker.StopJob(jobID)
|
||||
}
|
||||
|
||||
// RetryJob is implementation of same method in core interface.
|
||||
func (c *Controller) RetryJob(jobID string) error {
|
||||
func (bc *basicController) RetryJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
return errs.BadRequestError(errors.New("empty job ID"))
|
||||
}
|
||||
|
||||
return c.backendPool.RetryJob(jobID)
|
||||
return bc.backendWorker.RetryJob(jobID)
|
||||
}
|
||||
|
||||
// GetJobLogData is used to return the log text data for the specified job if exists
|
||||
func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
|
||||
func (bc *basicController) GetJobLogData(jobID string) ([]byte, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return nil, errors.New("empty job ID")
|
||||
return nil, errs.BadRequestError(errors.New("empty job ID"))
|
||||
}
|
||||
|
||||
logData, err := logger.Retrieve(jobID)
|
||||
@ -149,12 +147,46 @@ func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
|
||||
}
|
||||
|
||||
// CheckStatus is implementation of same method in core interface.
|
||||
func (c *Controller) CheckStatus() (models.JobPoolStats, error) {
|
||||
return c.backendPool.Stats()
|
||||
func (bc *basicController) CheckStatus() (*worker.Stats, error) {
|
||||
return bc.backendWorker.Stats()
|
||||
}
|
||||
|
||||
func validJobReq(req models.JobRequest) error {
|
||||
if req.Job == nil {
|
||||
// GetPeriodicExecutions gets the periodic executions for the specified periodic job
|
||||
func (bc *basicController) GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
if utils.IsEmptyStr(periodicJobID) {
|
||||
return nil, 0, errs.BadRequestError(errors.New("nil periodic job ID"))
|
||||
}
|
||||
|
||||
t, err := bc.ctl.Track(periodicJobID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
eIDs, total, err := t.Executions(query)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
res := make([]*job.Stats, 0)
|
||||
for _, eID := range eIDs {
|
||||
et, err := bc.ctl.Track(eID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
res = append(res, et.Job())
|
||||
}
|
||||
|
||||
return res, total, nil
|
||||
}
|
||||
|
||||
// ScheduledJobs returns the scheduled jobs by page
|
||||
func (bc *basicController) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
return bc.backendWorker.ScheduledJobs(query)
|
||||
}
|
||||
|
||||
func validJobReq(req *job.Request) error {
|
||||
if req == nil || req.Job == nil {
|
||||
return errors.New("empty job request is not allowed")
|
||||
}
|
||||
|
||||
@ -166,29 +198,29 @@ func validJobReq(req models.JobRequest) error {
|
||||
return errors.New("metadata of job is missing")
|
||||
}
|
||||
|
||||
if req.Job.Metadata.JobKind != job.JobKindGeneric &&
|
||||
req.Job.Metadata.JobKind != job.JobKindPeriodic &&
|
||||
req.Job.Metadata.JobKind != job.JobKindScheduled {
|
||||
return fmt.Errorf(
|
||||
if req.Job.Metadata.JobKind != job.KindGeneric &&
|
||||
req.Job.Metadata.JobKind != job.KindPeriodic &&
|
||||
req.Job.Metadata.JobKind != job.KindScheduled {
|
||||
return errors.Errorf(
|
||||
"job kind '%s' is not supported, only support '%s','%s','%s'",
|
||||
req.Job.Metadata.JobKind,
|
||||
job.JobKindGeneric,
|
||||
job.JobKindScheduled,
|
||||
job.JobKindPeriodic)
|
||||
job.KindGeneric,
|
||||
job.KindScheduled,
|
||||
job.KindPeriodic)
|
||||
}
|
||||
|
||||
if req.Job.Metadata.JobKind == job.JobKindScheduled &&
|
||||
if req.Job.Metadata.JobKind == job.KindScheduled &&
|
||||
req.Job.Metadata.ScheduleDelay == 0 {
|
||||
return fmt.Errorf("'schedule_delay' must be specified if the job kind is '%s'", job.JobKindScheduled)
|
||||
return errors.Errorf("'schedule_delay' must be specified for %s job", job.KindScheduled)
|
||||
}
|
||||
|
||||
if req.Job.Metadata.JobKind == job.JobKindPeriodic {
|
||||
if req.Job.Metadata.JobKind == job.KindPeriodic {
|
||||
if utils.IsEmptyStr(req.Job.Metadata.Cron) {
|
||||
return fmt.Errorf("'cron_spec' must be specified if the job kind is '%s'", job.JobKindPeriodic)
|
||||
return fmt.Errorf("'cron_spec' must be specified for the %s job", job.KindPeriodic)
|
||||
}
|
||||
|
||||
if _, err := cron.Parse(req.Job.Metadata.Cron); err != nil {
|
||||
return fmt.Errorf("'cron_spec' is not correctly set: %s", err)
|
||||
return fmt.Errorf("'cron_spec' is not correctly set: %s: %s", req.Job.Metadata.Cron, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -14,312 +14,404 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl/sample"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/goharbor/harbor/src/jobservice/worker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestLaunchGenericJob(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Generic", false, false)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// ControllerTestSuite tests functions of core controller
|
||||
type ControllerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
if res.Stats.JobID != "fake_ID" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
|
||||
lcmCtl *fakeLcmController
|
||||
worker *fakeWorker
|
||||
ctl Interface
|
||||
|
||||
res *job.Stats
|
||||
jobID string
|
||||
params job.Parameters
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *ControllerTestSuite) SetupSuite() {
|
||||
suite.ctl = NewController(suite, suite)
|
||||
|
||||
suite.params = make(job.Parameters)
|
||||
suite.params["name"] = "testing:v1"
|
||||
|
||||
suite.jobID = utils.MakeIdentifier()
|
||||
suite.res = &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: suite.jobID,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLaunchGenericJobUnique(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Generic", true, false)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Prepare for each test case
|
||||
func (suite *ControllerTestSuite) SetupTest() {
|
||||
suite.worker = &fakeWorker{}
|
||||
suite.lcmCtl = &fakeLcmController{}
|
||||
|
||||
if res.Stats.JobID != "fake_ID" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
|
||||
}
|
||||
suite.lcmCtl.On("Track", suite.jobID).Return(job.NewBasicTrackerWithStats(nil, suite.res, "ns", nil, nil), nil)
|
||||
suite.lcmCtl.On("New", suite.res).Return(job.NewBasicTrackerWithStats(nil, suite.res, "ns", nil, nil), nil)
|
||||
|
||||
suite.worker.On("IsKnownJob", job.SampleJob).Return((*sample.Job)(nil), true)
|
||||
suite.worker.On("IsKnownJob", "fake").Return(nil, false)
|
||||
suite.worker.On("ValidateJobParameters", (*sample.Job)(nil), suite.params).Return(nil)
|
||||
}
|
||||
|
||||
func TestLaunchGenericJobWithHook(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Generic", false, true)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if res.Stats.JobID != "fake_ID" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID' but got '%s'\n", res.Stats.JobID)
|
||||
}
|
||||
// TestControllerTestSuite is suite entry for 'go test'
|
||||
func TestControllerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(ControllerTestSuite))
|
||||
}
|
||||
|
||||
func TestLaunchScheduledJob(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Scheduled", false, true)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *ControllerTestSuite) TestLaunchGenericJob() {
|
||||
req := createJobReq("Generic")
|
||||
|
||||
if res.Stats.JobID != "fake_ID_Scheduled" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID_Scheduled' but got '%s'\n", res.Stats.JobID)
|
||||
}
|
||||
suite.worker.On("Enqueue", job.SampleJob, suite.params, true, req.Job.StatusHook).Return(suite.res, nil)
|
||||
|
||||
res, err := suite.ctl.LaunchJob(req)
|
||||
require.Nil(suite.T(), err, "launch job: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
|
||||
}
|
||||
|
||||
func TestLaunchScheduledUniqueJob(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Scheduled", true, false)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TestLaunchScheduledJob ...
|
||||
func (suite *ControllerTestSuite) TestLaunchScheduledJob() {
|
||||
req := createJobReq("Scheduled")
|
||||
|
||||
if res.Stats.JobID != "fake_ID_Scheduled" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID_Scheduled' but got '%s'\n", res.Stats.JobID)
|
||||
}
|
||||
suite.worker.On("Schedule", job.SampleJob, suite.params, uint64(100), true, req.Job.StatusHook).Return(suite.res, nil)
|
||||
|
||||
res, err := suite.ctl.LaunchJob(req)
|
||||
require.Nil(suite.T(), err, "launch scheduled job: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
|
||||
}
|
||||
|
||||
func TestLaunchPeriodicJob(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
req := createJobReq("Periodic", true, false)
|
||||
res, err := c.LaunchJob(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TestLaunchPeriodicJob ...
|
||||
func (suite *ControllerTestSuite) TestLaunchPeriodicJob() {
|
||||
req := createJobReq("Periodic")
|
||||
|
||||
if res.Stats.JobID != "fake_ID_Periodic" {
|
||||
t.Fatalf("expect enqueued job ID 'fake_ID_Periodic' but got '%s'\n", res.Stats.JobID)
|
||||
}
|
||||
suite.worker.On("PeriodicallyEnqueue", job.SampleJob, suite.params, "5 * * * * *", true, req.Job.StatusHook).Return(suite.res, nil)
|
||||
|
||||
res, err := suite.ctl.LaunchJob(req)
|
||||
require.Nil(suite.T(), err, "launch periodic job: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
|
||||
}
|
||||
|
||||
func TestGetJobStats(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
stats, err := c.GetJob("fake_ID")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.Stats.Status != "running" {
|
||||
t.Fatalf("expect stauts 'running' but got '%s'\n", stats.Stats.Status)
|
||||
}
|
||||
// TestGetJobStats ...
|
||||
func (suite *ControllerTestSuite) TestGetJobStats() {
|
||||
res, err := suite.ctl.GetJob(suite.jobID)
|
||||
require.Nil(suite.T(), err, "get job stats: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), suite.jobID, res.Info.JobID, "mismatch job ID")
|
||||
}
|
||||
|
||||
func TestJobActions(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
// TestJobActions ...
|
||||
func (suite *ControllerTestSuite) TestJobActions() {
|
||||
suite.worker.On("StopJob", suite.jobID).Return(nil)
|
||||
suite.worker.On("RetryJob", suite.jobID).Return(nil)
|
||||
|
||||
if err := c.StopJob("fake_ID"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := suite.ctl.StopJob(suite.jobID)
|
||||
err = suite.ctl.RetryJob(suite.jobID)
|
||||
|
||||
if err := c.CancelJob("fake_ID"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := c.RetryJob("fake_ID"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Nil(suite.T(), err, "job action: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
func TestGetJobLogData(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
|
||||
if _, err := c.GetJobLogData("fake_ID"); err == nil {
|
||||
t.Fatal("expect error but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckStatus(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
|
||||
st, err := c.CheckStatus()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(st.Pools) == 0 {
|
||||
t.Fatal("expect status data but got zero list")
|
||||
}
|
||||
|
||||
if st.Pools[0].Status != "running" {
|
||||
t.Fatalf("expect status 'running' but got '%s'\n", st.Pools[0].Status)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidCheck(t *testing.T) {
|
||||
pool := &fakePool{}
|
||||
c := NewController(pool)
|
||||
|
||||
req := models.JobRequest{
|
||||
Job: &models.JobData{
|
||||
Name: "DEMO",
|
||||
Metadata: &models.JobMetadata{
|
||||
JobKind: "kind",
|
||||
// TestCheckStatus ...
|
||||
func (suite *ControllerTestSuite) TestCheckStatus() {
|
||||
suite.worker.On("Stats").Return(&worker.Stats{
|
||||
Pools: []*worker.StatsData{
|
||||
{
|
||||
Status: "running",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
|
||||
st, err := suite.ctl.CheckStatus()
|
||||
require.Nil(suite.T(), err, "check worker status: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), 1, len(st.Pools), "expected 1 pool status but got 0")
|
||||
assert.Equal(suite.T(), "running", st.Pools[0].Status, "expected running pool but got %s", st.Pools[0].Status)
|
||||
}
|
||||
|
||||
// TestScheduledJobs ...
|
||||
func (suite *ControllerTestSuite) TestScheduledJobs() {
|
||||
q := &query.Parameter{
|
||||
PageSize: 20,
|
||||
PageNumber: 1,
|
||||
}
|
||||
|
||||
suite.worker.On("ScheduledJobs", q).Return([]*job.Stats{suite.res}, 1, nil)
|
||||
|
||||
_, total, err := suite.ctl.ScheduledJobs(q)
|
||||
require.Nil(suite.T(), err, "scheduled jobs: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), int64(1), total, "expected 1 item but got 0")
|
||||
}
|
||||
|
||||
// TestInvalidChecks ...
|
||||
func (suite *ControllerTestSuite) TestInvalidChecks() {
|
||||
req := createJobReq("kind")
|
||||
|
||||
_, err := suite.ctl.LaunchJob(req)
|
||||
assert.NotNil(suite.T(), err, "invalid job kind: error expected but got nil")
|
||||
|
||||
req.Job.Metadata.JobKind = job.KindGeneric
|
||||
req.Job.Name = "fake"
|
||||
_, err = suite.ctl.LaunchJob(req)
|
||||
assert.NotNil(suite.T(), err, "invalid job name: error expected but got nil")
|
||||
|
||||
req.Job.Metadata.JobKind = job.KindScheduled
|
||||
req.Job.Name = job.SampleJob
|
||||
req.Job.Metadata.ScheduleDelay = 0
|
||||
_, err = suite.ctl.LaunchJob(req)
|
||||
assert.NotNil(suite.T(), err, "invalid scheduled job: error expected but got nil")
|
||||
|
||||
req.Job.Metadata.JobKind = job.KindPeriodic
|
||||
req.Job.Metadata.Cron = "x x x x x x"
|
||||
_, err = suite.ctl.LaunchJob(req)
|
||||
assert.NotNil(suite.T(), err, "invalid job name: error expected but got nil")
|
||||
}
|
||||
|
||||
// TestGetPeriodicExecutions tests GetPeriodicExecutions
|
||||
func (suite *ControllerTestSuite) TestGetPeriodicExecutions() {
|
||||
pool := tests.GiveMeRedisPool()
|
||||
namespace := tests.GiveMeTestNamespace()
|
||||
|
||||
jobID := utils.MakeIdentifier()
|
||||
nID := time.Now().Unix()
|
||||
mockJobStats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: jobID,
|
||||
Status: job.ScheduledStatus.String(),
|
||||
JobKind: job.KindPeriodic,
|
||||
JobName: job.SampleJob,
|
||||
IsUnique: false,
|
||||
CronSpec: "0 0 * * * *",
|
||||
NumericPID: nID,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := c.LaunchJob(req); err == nil {
|
||||
t.Fatal("error expected but got nil")
|
||||
t := job.NewBasicTrackerWithStats(context.TODO(), mockJobStats, namespace, pool, nil)
|
||||
err := t.Save()
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
executionID := utils.MakeIdentifier()
|
||||
runAt := time.Now().Add(1 * time.Hour).Unix()
|
||||
executionStats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: executionID,
|
||||
Status: job.ScheduledStatus.String(),
|
||||
JobKind: job.KindScheduled,
|
||||
JobName: job.SampleJob,
|
||||
IsUnique: false,
|
||||
CronSpec: "0 0 * * * *",
|
||||
RunAt: runAt,
|
||||
EnqueueTime: runAt,
|
||||
UpstreamJobID: jobID,
|
||||
},
|
||||
}
|
||||
|
||||
req.Job.Name = "fake"
|
||||
if _, err := c.LaunchJob(req); err == nil {
|
||||
t.Fatal("error expected but got nil")
|
||||
}
|
||||
t2 := job.NewBasicTrackerWithStats(context.TODO(), executionStats, namespace, pool, nil)
|
||||
err = t2.Save()
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
req.Job.Metadata.JobKind = "Scheduled"
|
||||
if _, err := c.LaunchJob(req); err == nil {
|
||||
t.Fatal("error expected but got nil")
|
||||
}
|
||||
suite.lcmCtl.On("Track", jobID).Return(t, nil)
|
||||
suite.lcmCtl.On("Track", executionID).Return(t2, nil)
|
||||
|
||||
req.Job.Metadata.JobKind = "Periodic"
|
||||
req.Job.Metadata.Cron = "x x x x x x"
|
||||
if _, err := c.LaunchJob(req); err == nil {
|
||||
t.Fatal("error expected but got nil")
|
||||
}
|
||||
_, total, err := suite.ctl.GetPeriodicExecutions(jobID, &query.Parameter{
|
||||
PageSize: 10,
|
||||
PageNumber: 1,
|
||||
Extras: make(query.ExtraParameters),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), int64(1), total)
|
||||
}
|
||||
|
||||
func createJobReq(kind string, isUnique bool, withHook bool) models.JobRequest {
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing"
|
||||
req := models.JobRequest{
|
||||
Job: &models.JobData{
|
||||
Name: "DEMO",
|
||||
func createJobReq(kind string) *job.Request {
|
||||
params := make(job.Parameters)
|
||||
params["name"] = "testing:v1"
|
||||
return &job.Request{
|
||||
Job: &job.RequestBody{
|
||||
Name: job.SampleJob,
|
||||
Parameters: params,
|
||||
Metadata: &models.JobMetadata{
|
||||
StatusHook: "http://localhost:9090",
|
||||
Metadata: &job.Metadata{
|
||||
JobKind: kind,
|
||||
IsUnique: isUnique,
|
||||
IsUnique: true,
|
||||
ScheduleDelay: 100,
|
||||
Cron: "5 * * * * *",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if withHook {
|
||||
req.Job.StatusHook = "http://localhost:9090"
|
||||
// Implement lcm controller interface
|
||||
func (suite *ControllerTestSuite) Serve() error {
|
||||
return suite.lcmCtl.Serve()
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) New(stats *job.Stats) (job.Tracker, error) {
|
||||
return suite.lcmCtl.New(stats)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) Track(jobID string) (job.Tracker, error) {
|
||||
return suite.lcmCtl.Track(jobID)
|
||||
}
|
||||
|
||||
// Implement worker interface
|
||||
func (suite *ControllerTestSuite) Start() error {
|
||||
return suite.worker.Start()
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) RegisterJobs(jobs map[string]interface{}) error {
|
||||
return suite.worker.RegisterJobs(jobs)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) Enqueue(jobName string, params job.Parameters, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
return suite.worker.Enqueue(jobName, params, isUnique, webHook)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) Schedule(jobName string, params job.Parameters, runAfterSeconds uint64, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
return suite.worker.Schedule(jobName, params, runAfterSeconds, isUnique, webHook)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) PeriodicallyEnqueue(jobName string, params job.Parameters, cronSetting string, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
return suite.worker.PeriodicallyEnqueue(jobName, params, cronSetting, isUnique, webHook)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) Stats() (*worker.Stats, error) {
|
||||
return suite.worker.Stats()
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) IsKnownJob(name string) (interface{}, bool) {
|
||||
return suite.worker.IsKnownJob(name)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) ValidateJobParameters(jobType interface{}, params job.Parameters) error {
|
||||
return suite.worker.ValidateJobParameters(jobType, params)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) StopJob(jobID string) error {
|
||||
return suite.worker.StopJob(jobID)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) RetryJob(jobID string) error {
|
||||
return suite.worker.RetryJob(jobID)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
return suite.worker.ScheduledJobs(query)
|
||||
}
|
||||
|
||||
// Implement fake objects with mock
|
||||
type fakeLcmController struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (flc *fakeLcmController) Serve() error {
|
||||
return flc.Called().Error(0)
|
||||
}
|
||||
|
||||
func (flc *fakeLcmController) New(stats *job.Stats) (job.Tracker, error) {
|
||||
args := flc.Called(stats)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return req
|
||||
return args.Get(0).(job.Tracker), nil
|
||||
}
|
||||
|
||||
type fakePool struct{}
|
||||
|
||||
func (f *fakePool) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) RegisterJob(name string, job interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) RegisterJobs(jobs map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) Enqueue(jobName string, params models.Parameters, isUnique bool) (models.JobStats, error) {
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_ID",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakePool) Schedule(jobName string, params models.Parameters, runAfterSeconds uint64, isUnique bool) (models.JobStats, error) {
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_ID_Scheduled",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakePool) PeriodicallyEnqueue(jobName string, params models.Parameters, cronSetting string) (models.JobStats, error) {
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_ID_Periodic",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakePool) Stats() (models.JobPoolStats, error) {
|
||||
return models.JobPoolStats{
|
||||
Pools: []*models.JobPoolStatsData{
|
||||
{
|
||||
Status: "running",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakePool) IsKnownJob(name string) (interface{}, bool) {
|
||||
return (*fakeJob)(nil), true
|
||||
}
|
||||
|
||||
func (f *fakePool) ValidateJobParameters(jobType interface{}, params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) GetJobStats(jobID string) (models.JobStats, error) {
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_ID",
|
||||
Status: "running",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fakePool) StopJob(jobID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) CancelJob(jobID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) RetryJob(jobID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePool) RegisterHook(jobID string, hookURL string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeJob struct{}
|
||||
|
||||
func (j *fakeJob) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (j *fakeJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (j *fakeJob) Validate(params map[string]interface{}) error {
|
||||
if p, ok := params["name"]; ok {
|
||||
if p == "testing" {
|
||||
return nil
|
||||
}
|
||||
func (flc *fakeLcmController) Track(jobID string) (job.Tracker, error) {
|
||||
args := flc.Called(jobID)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return errors.New("testing error")
|
||||
return args.Get(0).(job.Tracker), nil
|
||||
}
|
||||
|
||||
func (j *fakeJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
return nil
|
||||
type fakeWorker struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (f *fakeWorker) Start() error {
|
||||
return f.Called().Error(0)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) RegisterJobs(jobs map[string]interface{}) error {
|
||||
return f.Called(jobs).Error(0)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) Enqueue(jobName string, params job.Parameters, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
args := f.Called(jobName, params, isUnique, webHook)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*job.Stats), nil
|
||||
}
|
||||
|
||||
func (f *fakeWorker) Schedule(jobName string, params job.Parameters, runAfterSeconds uint64, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
args := f.Called(jobName, params, runAfterSeconds, isUnique, webHook)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*job.Stats), nil
|
||||
}
|
||||
|
||||
func (f *fakeWorker) PeriodicallyEnqueue(jobName string, params job.Parameters, cronSetting string, isUnique bool, webHook string) (*job.Stats, error) {
|
||||
args := f.Called(jobName, params, cronSetting, isUnique, webHook)
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*job.Stats), nil
|
||||
}
|
||||
|
||||
func (f *fakeWorker) Stats() (*worker.Stats, error) {
|
||||
args := f.Called()
|
||||
if args.Error(1) != nil {
|
||||
return nil, args.Error(1)
|
||||
}
|
||||
|
||||
return args.Get(0).(*worker.Stats), nil
|
||||
}
|
||||
|
||||
func (f *fakeWorker) IsKnownJob(name string) (interface{}, bool) {
|
||||
args := f.Called(name)
|
||||
if !args.Bool(1) {
|
||||
return nil, args.Bool(1)
|
||||
}
|
||||
|
||||
return args.Get(0), args.Bool(1)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) ValidateJobParameters(jobType interface{}, params job.Parameters) error {
|
||||
return f.Called(jobType, params).Error(0)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) StopJob(jobID string) error {
|
||||
return f.Called(jobID).Error(0)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) RetryJob(jobID string) error {
|
||||
return f.Called(jobID).Error(0)
|
||||
}
|
||||
|
||||
func (f *fakeWorker) ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
args := f.Called(query)
|
||||
if args.Error(2) != nil {
|
||||
return nil, 0, args.Error(2)
|
||||
}
|
||||
|
||||
return args.Get(0).([]*job.Stats), int64(args.Int(1)), nil
|
||||
}
|
||||
|
@ -16,28 +16,30 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/worker"
|
||||
)
|
||||
|
||||
// Interface defines the related main methods of job operation.
|
||||
type Interface interface {
|
||||
// LaunchJob is used to handle the job submission request.
|
||||
//
|
||||
// req JobRequest : Job request contains related required information of queuing job.
|
||||
// req *job.Request : Job request contains related required information of queuing job.
|
||||
//
|
||||
// Returns:
|
||||
// JobStats: Job status info with ID and self link returned if job is successfully launched.
|
||||
// error : Error returned if failed to launch the specified job.
|
||||
LaunchJob(req models.JobRequest) (models.JobStats, error)
|
||||
// job.Stats : Job status info with ID and self link returned if job is successfully launched.
|
||||
// error : Error returned if failed to launch the specified job.
|
||||
LaunchJob(req *job.Request) (*job.Stats, error)
|
||||
|
||||
// GetJob is used to handle the job stats query request.
|
||||
//
|
||||
// jobID string: ID of job.
|
||||
//
|
||||
// Returns:
|
||||
// JobStats: Job status info if job exists.
|
||||
// error : Error returned if failed to get the specified job.
|
||||
GetJob(jobID string) (models.JobStats, error)
|
||||
// *job.Stats : Job status info if job exists.
|
||||
// error : Error returned if failed to get the specified job.
|
||||
GetJob(jobID string) (*job.Stats, error)
|
||||
|
||||
// StopJob is used to handle the job stopping request.
|
||||
//
|
||||
@ -55,17 +57,19 @@ type Interface interface {
|
||||
// error : Error returned if failed to retry the specified job.
|
||||
RetryJob(jobID string) error
|
||||
|
||||
// Cancel the job
|
||||
//
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
// Returns:
|
||||
// error : error returned if meet any problems
|
||||
CancelJob(jobID string) error
|
||||
|
||||
// CheckStatus is used to handle the job service healthy status checking request.
|
||||
CheckStatus() (models.JobPoolStats, error)
|
||||
CheckStatus() (*worker.Stats, error)
|
||||
|
||||
// GetJobLogData is used to return the log text data for the specified job if exists
|
||||
GetJobLogData(jobID string) ([]byte, error)
|
||||
|
||||
// Get the periodic executions for the specified periodic job.
|
||||
// Pagination by query is supported.
|
||||
// The total number is also returned.
|
||||
GetPeriodicExecutions(periodicJobID string, query *query.Parameter) ([]*job.Stats, int64, error)
|
||||
|
||||
// Get the scheduled jobs by page
|
||||
// The page number in the query will be ignored, default 20 is used. This is the limitation of backend lib.
|
||||
// The total number is also returned.
|
||||
ScheduledJobs(query *query.Parameter) ([]*job.Stats, int64, error)
|
||||
}
|
||||
|
5
src/jobservice/env/context.go
vendored
5
src/jobservice/env/context.go
vendored
@ -16,6 +16,7 @@ package env
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@ -33,6 +34,6 @@ type Context struct {
|
||||
ErrorChan chan error
|
||||
|
||||
// The base job context reference
|
||||
// It will be the parent conetext of job execution context
|
||||
JobContext JobContext
|
||||
// It will be the parent context of job execution context
|
||||
JobContext job.Context
|
||||
}
|
||||
|
@ -21,26 +21,20 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// JobStoppedErrorCode is code for jobStoppedError
|
||||
JobStoppedErrorCode = 10000 + iota
|
||||
// JobCancelledErrorCode is code for jobCancelledError
|
||||
JobCancelledErrorCode
|
||||
// ReadRequestBodyErrorCode is code for the error of reading http request body error
|
||||
ReadRequestBodyErrorCode
|
||||
ReadRequestBodyErrorCode = 10000 + iota
|
||||
// HandleJSONDataErrorCode is code for the error of handling json data error
|
||||
HandleJSONDataErrorCode
|
||||
// MissingBackendHandlerErrorCode is code for the error of missing backend controller
|
||||
MissingBackendHandlerErrorCode
|
||||
// LaunchJobErrorCode is code for the error of launching job
|
||||
LaunchJobErrorCode
|
||||
// CheckStatsErrorCode is code for the error of checking stats of worker pool
|
||||
// CheckStatsErrorCode is code for the error of checking stats of worker worker
|
||||
CheckStatsErrorCode
|
||||
// GetJobStatsErrorCode is code for the error of getting stats of enqueued job
|
||||
GetJobStatsErrorCode
|
||||
// StopJobErrorCode is code for the error of stopping job
|
||||
StopJobErrorCode
|
||||
// CancelJobErrorCode is code for the error of cancelling job
|
||||
CancelJobErrorCode
|
||||
// RetryJobErrorCode is code for the error of retrying job
|
||||
RetryJobErrorCode
|
||||
// UnknownActionNameErrorCode is code for the case of unknown action name
|
||||
@ -53,6 +47,14 @@ const (
|
||||
UnAuthorizedErrorCode
|
||||
// ResourceConflictsErrorCode is code for the error of resource conflicting
|
||||
ResourceConflictsErrorCode
|
||||
// BadRequestErrorCode is code for the error of bad request
|
||||
BadRequestErrorCode
|
||||
// GetScheduledJobsErrorCode is code for the error of getting scheduled jobs
|
||||
GetScheduledJobsErrorCode
|
||||
// GetPeriodicExecutionErrorCode is code for the error of getting periodic executions
|
||||
GetPeriodicExecutionErrorCode
|
||||
// StatusMismatchErrorCode is code for the error of mismatching status
|
||||
StatusMismatchErrorCode
|
||||
)
|
||||
|
||||
// baseError ...
|
||||
@ -82,92 +84,67 @@ func New(code uint16, err string, description string) error {
|
||||
|
||||
// ReadRequestBodyError is error wrapper for the error of reading request body.
|
||||
func ReadRequestBodyError(err error) error {
|
||||
return New(ReadRequestBodyErrorCode, "Read request body failed with error", err.Error())
|
||||
return New(ReadRequestBodyErrorCode, "read request body failed with error", err.Error())
|
||||
}
|
||||
|
||||
// HandleJSONDataError is error wrapper for the error of handling json data.
|
||||
func HandleJSONDataError(err error) error {
|
||||
return New(HandleJSONDataErrorCode, "Handle json data failed with error", err.Error())
|
||||
return New(HandleJSONDataErrorCode, "handle json data failed with error", err.Error())
|
||||
}
|
||||
|
||||
// MissingBackendHandlerError is error wrapper for the error of missing backend controller.
|
||||
func MissingBackendHandlerError(err error) error {
|
||||
return New(MissingBackendHandlerErrorCode, "Missing backend controller to handle the requests", err.Error())
|
||||
return New(MissingBackendHandlerErrorCode, "missing backend controller to handle the requests", err.Error())
|
||||
}
|
||||
|
||||
// LaunchJobError is error wrapper for the error of launching job failed.
|
||||
func LaunchJobError(err error) error {
|
||||
return New(LaunchJobErrorCode, "Launch job failed with error", err.Error())
|
||||
return New(LaunchJobErrorCode, "launch job failed with error", err.Error())
|
||||
}
|
||||
|
||||
// CheckStatsError is error wrapper for the error of checking stats failed
|
||||
func CheckStatsError(err error) error {
|
||||
return New(CheckStatsErrorCode, "Check stats of server failed with error", err.Error())
|
||||
return New(CheckStatsErrorCode, "check stats of server failed with error", err.Error())
|
||||
}
|
||||
|
||||
// GetJobStatsError is error wrapper for the error of getting job stats
|
||||
func GetJobStatsError(err error) error {
|
||||
return New(GetJobStatsErrorCode, "Get job stats failed with error", err.Error())
|
||||
return New(GetJobStatsErrorCode, "get job stats failed with error", err.Error())
|
||||
}
|
||||
|
||||
// StopJobError is error for the case of stopping job failed
|
||||
func StopJobError(err error) error {
|
||||
return New(StopJobErrorCode, "Stop job failed with error", err.Error())
|
||||
}
|
||||
|
||||
// CancelJobError is error for the case of cancelling job failed
|
||||
func CancelJobError(err error) error {
|
||||
return New(CancelJobErrorCode, "Cancel job failed with error", err.Error())
|
||||
return New(StopJobErrorCode, "stop job failed with error", err.Error())
|
||||
}
|
||||
|
||||
// RetryJobError is error for the case of retrying job failed
|
||||
func RetryJobError(err error) error {
|
||||
return New(RetryJobErrorCode, "Retry job failed with error", err.Error())
|
||||
return New(RetryJobErrorCode, "retry job failed with error", err.Error())
|
||||
}
|
||||
|
||||
// UnknownActionNameError is error for the case of getting unknown job action
|
||||
func UnknownActionNameError(err error) error {
|
||||
return New(UnknownActionNameErrorCode, "Unknown job action name", err.Error())
|
||||
return New(UnknownActionNameErrorCode, "unknown job action name", err.Error())
|
||||
}
|
||||
|
||||
// GetJobLogError is error for the case of getting job log failed
|
||||
func GetJobLogError(err error) error {
|
||||
return New(GetJobLogErrorCode, "Failed to get the job log", err.Error())
|
||||
return New(GetJobLogErrorCode, "failed to get the job log", err.Error())
|
||||
}
|
||||
|
||||
// UnauthorizedError is error for the case of unauthorized accessing
|
||||
func UnauthorizedError(err error) error {
|
||||
return New(UnAuthorizedErrorCode, "Unauthorized", err.Error())
|
||||
return New(UnAuthorizedErrorCode, "unauthorized", err.Error())
|
||||
}
|
||||
|
||||
// jobStoppedError is designed for the case of stopping job.
|
||||
type jobStoppedError struct {
|
||||
baseError
|
||||
// GetScheduledJobsError is error for the case of getting scheduled jobs failed
|
||||
func GetScheduledJobsError(err error) error {
|
||||
return New(GetScheduledJobsErrorCode, "failed to get scheduled jobs", err.Error())
|
||||
}
|
||||
|
||||
// JobStoppedError is error wrapper for the case of stopping job.
|
||||
func JobStoppedError() error {
|
||||
return jobStoppedError{
|
||||
baseError{
|
||||
Code: JobStoppedErrorCode,
|
||||
Err: "Job is stopped",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// jobCancelledError is designed for the case of cancelling job.
|
||||
type jobCancelledError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
// JobCancelledError is error wrapper for the case of cancelling job.
|
||||
func JobCancelledError() error {
|
||||
return jobCancelledError{
|
||||
baseError{
|
||||
Code: JobStoppedErrorCode,
|
||||
Err: "Job is cancelled",
|
||||
},
|
||||
}
|
||||
// GetPeriodicExecutionError is error for the case of getting periodic jobs failed
|
||||
func GetPeriodicExecutionError(err error) error {
|
||||
return New(GetPeriodicExecutionErrorCode, "failed to get periodic executions", err.Error())
|
||||
}
|
||||
|
||||
// objectNotFound is designed for the case of no object found
|
||||
@ -202,26 +179,70 @@ func ConflictError(object string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// IsJobStoppedError return true if the error is jobStoppedError
|
||||
func IsJobStoppedError(err error) bool {
|
||||
_, ok := err.(jobStoppedError)
|
||||
return ok
|
||||
// badRequestError is designed for the case of bad request
|
||||
type badRequestError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
// IsJobCancelledError return true if the error is jobCancelledError
|
||||
func IsJobCancelledError(err error) bool {
|
||||
_, ok := err.(jobCancelledError)
|
||||
return ok
|
||||
// BadRequestError returns the error of handing bad request case
|
||||
func BadRequestError(object interface{}) error {
|
||||
return badRequestError{
|
||||
baseError{
|
||||
Code: BadRequestErrorCode,
|
||||
Err: "bad request",
|
||||
Description: fmt.Sprintf("%s", object),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// statusMismatchError is designed for the case of job status update mismatching
|
||||
type statusMismatchError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
// StatusMismatchError returns the error of job status mismatching
|
||||
func StatusMismatchError(current, target string) error {
|
||||
return statusMismatchError{
|
||||
baseError{
|
||||
Code: StatusMismatchErrorCode,
|
||||
Err: "mismatch job status",
|
||||
Description: fmt.Sprintf("current %s, setting to %s", current, target),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// IsObjectNotFoundError return true if the error is objectNotFoundError
|
||||
func IsObjectNotFoundError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(objectNotFoundError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsConflictError returns true if the error is conflictError
|
||||
func IsConflictError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(conflictError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsBadRequestError returns true if the error is badRequestError
|
||||
func IsBadRequestError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(badRequestError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsStatusMismatchError returns true if the error is statusMismatchError
|
||||
func IsStatusMismatchError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := err.(statusMismatchError)
|
||||
return ok
|
||||
}
|
||||
|
343
src/jobservice/hook/hook_agent.go
Normal file
343
src/jobservice/hook/hook_agent.go
Normal file
@ -0,0 +1,343 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package hook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
// Influenced by the worker number setting
|
||||
maxEventChanBuffer = 1024
|
||||
// Max concurrent client handlers
|
||||
maxHandlers = 5
|
||||
// The max time for expiring the retrying events
|
||||
// 180 days
|
||||
maxEventExpireTime = 3600 * 24 * 180
|
||||
// Waiting a short while if any errors occurred
|
||||
shortLoopInterval = 5 * time.Second
|
||||
// Waiting for long while if no retrying elements found
|
||||
longLoopInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Agent is designed to handle the hook events with reasonable numbers of concurrent threads
|
||||
type Agent interface {
|
||||
// Trigger hooks
|
||||
Trigger(evt *Event) error
|
||||
// Serves events now
|
||||
Serve() error
|
||||
// Attach a job life cycle controller
|
||||
Attach(ctl lcm.Controller)
|
||||
}
|
||||
|
||||
// Event contains the hook URL and the data
|
||||
type Event struct {
|
||||
URL string `json:"url"`
|
||||
Message string `json:"message"` // meaningful text for event
|
||||
Data *job.StatusChange `json:"data"` // generic data
|
||||
Timestamp int64 `json:"timestamp"` // Use as time threshold of discarding the event (unit: second)
|
||||
}
|
||||
|
||||
// Validate event
|
||||
func (e *Event) Validate() error {
|
||||
_, err := url.Parse(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.Data == nil {
|
||||
return errors.New("nil hook data")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize event to bytes
|
||||
func (e *Event) Serialize() ([]byte, error) {
|
||||
return json.Marshal(e)
|
||||
}
|
||||
|
||||
// Deserialize the bytes to event
|
||||
func (e *Event) Deserialize(bytes []byte) error {
|
||||
return json.Unmarshal(bytes, e)
|
||||
}
|
||||
|
||||
// Basic agent for usage
|
||||
type basicAgent struct {
|
||||
context context.Context
|
||||
namespace string
|
||||
client Client
|
||||
ctl lcm.Controller
|
||||
events chan *Event
|
||||
tokens chan bool
|
||||
redisPool *redis.Pool
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewAgent is constructor of basic agent
|
||||
func NewAgent(ctx *env.Context, ns string, redisPool *redis.Pool) Agent {
|
||||
tks := make(chan bool, maxHandlers)
|
||||
// Put tokens
|
||||
for i := 0; i < maxHandlers; i++ {
|
||||
tks <- true
|
||||
}
|
||||
return &basicAgent{
|
||||
context: ctx.SystemContext,
|
||||
namespace: ns,
|
||||
client: NewClient(ctx.SystemContext),
|
||||
events: make(chan *Event, maxEventChanBuffer),
|
||||
tokens: tks,
|
||||
redisPool: redisPool,
|
||||
wg: ctx.WG,
|
||||
}
|
||||
}
|
||||
|
||||
// Attach a job life cycle controller
|
||||
func (ba *basicAgent) Attach(ctl lcm.Controller) {
|
||||
ba.ctl = ctl
|
||||
}
|
||||
|
||||
// Trigger implements the same method of interface @Agent
|
||||
func (ba *basicAgent) Trigger(evt *Event) error {
|
||||
if evt == nil {
|
||||
return errors.New("nil event")
|
||||
}
|
||||
|
||||
if err := evt.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ba.events <- evt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start the basic agent
|
||||
// Termination depends on the system context
|
||||
// Blocking call
|
||||
func (ba *basicAgent) Serve() error {
|
||||
if ba.ctl == nil {
|
||||
return errors.New("nil life cycle controller of hook agent")
|
||||
}
|
||||
|
||||
ba.wg.Add(1)
|
||||
go ba.loopRetry()
|
||||
logger.Info("Hook event retrying loop is started")
|
||||
|
||||
ba.wg.Add(1)
|
||||
go ba.serve()
|
||||
logger.Info("Basic hook agent is started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ba *basicAgent) serve() {
|
||||
defer func() {
|
||||
logger.Info("Basic hook agent is stopped")
|
||||
ba.wg.Done()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt := <-ba.events:
|
||||
// if exceed, wait here
|
||||
// avoid too many request connections at the same time
|
||||
<-ba.tokens
|
||||
go func(evt *Event) {
|
||||
defer func() {
|
||||
ba.tokens <- true // return token
|
||||
}()
|
||||
|
||||
if err := ba.client.SendEvent(evt); err != nil {
|
||||
logger.Errorf("Send hook event '%s' to '%s' failed with error: %s; push to the queue for retrying later", evt.Message, evt.URL, err)
|
||||
// Push event to the retry queue
|
||||
if err := ba.pushForRetry(evt); err != nil {
|
||||
// Failed to push to the retry queue, let's directly push it
|
||||
// to the event channel of this node with reasonable backoff time.
|
||||
logger.Errorf("Failed to push hook event to the retry queue: %s", err)
|
||||
|
||||
// Put to the event chan after waiting for a reasonable while,
|
||||
// waiting is important, it can avoid sending large scale failure expecting
|
||||
// requests in a short while.
|
||||
// As 'pushForRetry' has checked the timestamp and expired event
|
||||
// will be directly discarded and nil error is returned, no need to
|
||||
// check it again here.
|
||||
<-time.After(time.Duration(rand.Int31n(55)+5) * time.Second)
|
||||
ba.events <- evt
|
||||
}
|
||||
}
|
||||
}(evt)
|
||||
|
||||
case <-ba.context.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ba *basicAgent) pushForRetry(evt *Event) error {
|
||||
if evt == nil {
|
||||
// do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
// Anyway we'll need the raw JSON, let's try to serialize it here
|
||||
rawJSON, err := evt.Serialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
if evt.Timestamp > 0 && now-evt.Timestamp >= maxEventExpireTime {
|
||||
// Expired, do not need to push back to the retry queue
|
||||
logger.Warningf("Event is expired: %s", rawJSON)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
conn := ba.redisPool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyHookEventRetryQueue(ba.namespace)
|
||||
args := make([]interface{}, 0)
|
||||
|
||||
// Use nano time to get more accurate timestamp
|
||||
score := time.Now().UnixNano()
|
||||
args = append(args, key, "NX", score, rawJSON)
|
||||
|
||||
_, err = conn.Do("ZADD", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ba *basicAgent) loopRetry() {
|
||||
defer func() {
|
||||
logger.Info("Hook event retrying loop exit")
|
||||
ba.wg.Done()
|
||||
}()
|
||||
|
||||
token := make(chan bool, 1)
|
||||
token <- true
|
||||
|
||||
for {
|
||||
<-token
|
||||
if err := ba.reSend(); err != nil {
|
||||
waitInterval := shortLoopInterval
|
||||
if err == rds.ErrNoElements {
|
||||
// No elements
|
||||
waitInterval = longLoopInterval
|
||||
} else {
|
||||
logger.Errorf("Resend hook event error: %s", err.Error())
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(waitInterval):
|
||||
// Just wait, do nothing
|
||||
case <-ba.context.Done():
|
||||
// Terminated
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Put token back
|
||||
token <- true
|
||||
}
|
||||
}
|
||||
|
||||
func (ba *basicAgent) reSend() error {
|
||||
evt, err := ba.popMinOne()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobID, status, err := extractJobID(evt.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err := ba.ctl.Track(jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diff := status.Compare(job.Status(t.Job().Info.Status))
|
||||
if diff > 0 ||
|
||||
(diff == 0 && t.Job().Info.CheckIn != evt.Data.CheckIn) {
|
||||
ba.events <- evt
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Errorf("outdated hook event: %s, latest job status: %s", evt.Message, t.Job().Info.Status)
|
||||
}
|
||||
|
||||
func (ba *basicAgent) popMinOne() (*Event, error) {
|
||||
conn := ba.redisPool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyHookEventRetryQueue(ba.namespace)
|
||||
minOne, err := rds.ZPopMin(conn, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawEvent, ok := minOne.([]byte)
|
||||
if !ok {
|
||||
return nil, errors.New("bad request: non bytes slice for raw event")
|
||||
}
|
||||
|
||||
evt := &Event{}
|
||||
if err := evt.Deserialize(rawEvent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return evt, nil
|
||||
}
|
||||
|
||||
// Extract the job ID and status from the event data field
|
||||
// First return is job ID
|
||||
// Second return is job status
|
||||
// Last one is error
|
||||
func extractJobID(data *job.StatusChange) (string, job.Status, error) {
|
||||
if data != nil && len(data.JobID) > 0 {
|
||||
status := job.Status(data.Status)
|
||||
if status.Validate() == nil {
|
||||
return data.JobID, status, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", "", errors.New("malform job status change data")
|
||||
}
|
202
src/jobservice/hook/hook_agent_test.go
Normal file
202
src/jobservice/hook/hook_agent_test.go
Normal file
@ -0,0 +1,202 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package hook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// HookAgentTestSuite tests functions of hook agent
|
||||
type HookAgentTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
pool *redis.Pool
|
||||
namespace string
|
||||
lcmCtl lcm.Controller
|
||||
|
||||
envContext *env.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// TestHookAgentTestSuite is entry of go test
|
||||
func TestHookAgentTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(HookAgentTestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suites
|
||||
func (suite *HookAgentTestSuite) SetupSuite() {
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
suite.envContext = &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
}
|
||||
suite.cancel = cancel
|
||||
|
||||
suite.lcmCtl = lcm.NewController(suite.envContext, suite.namespace, suite.pool, func(hookURL string, change *job.StatusChange) error { return nil })
|
||||
}
|
||||
|
||||
// TearDownSuite prepares test suites
|
||||
func (suite *HookAgentTestSuite) TearDownSuite() {
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestEventSending ...
|
||||
func (suite *HookAgentTestSuite) TestEventSending() {
|
||||
done := make(chan bool, 1)
|
||||
|
||||
expected := uint32(1300) // >1024 max
|
||||
count := uint32(0)
|
||||
counter := &count
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
c := atomic.AddUint32(counter, 1)
|
||||
if c == expected {
|
||||
done <- true
|
||||
}
|
||||
}()
|
||||
_, _ = fmt.Fprintln(w, "ok")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
// in case test failed and avoid dead lock
|
||||
go func() {
|
||||
<-time.After(time.Duration(10) * time.Second)
|
||||
done <- true // time out
|
||||
}()
|
||||
|
||||
agent := NewAgent(suite.envContext, suite.namespace, suite.pool)
|
||||
agent.Attach(suite.lcmCtl)
|
||||
err := agent.Serve()
|
||||
require.NoError(suite.T(), err, "agent serve: nil error expected but got %s", err)
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
suite.cancel()
|
||||
}()
|
||||
|
||||
for i := uint32(0); i < expected; i++ {
|
||||
changeData := &job.StatusChange{
|
||||
JobID: fmt.Sprintf("job-%d", i),
|
||||
Status: "running",
|
||||
}
|
||||
|
||||
evt := &Event{
|
||||
URL: ts.URL,
|
||||
Message: fmt.Sprintf("status of job %s change to %s", changeData.JobID, changeData.Status),
|
||||
Data: changeData,
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
err := agent.Trigger(evt)
|
||||
require.Nil(suite.T(), err, "agent trigger: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// Check results
|
||||
<-done
|
||||
require.Equal(suite.T(), expected, count, "expected %d hook events but only got %d", expected, count)
|
||||
}()
|
||||
|
||||
// Wait
|
||||
suite.envContext.WG.Wait()
|
||||
}
|
||||
|
||||
// TestRetryAndPopMin ...
|
||||
func (suite *HookAgentTestSuite) TestRetryAndPopMin() {
|
||||
ctx := context.Background()
|
||||
|
||||
tks := make(chan bool, maxHandlers)
|
||||
// Put tokens
|
||||
for i := 0; i < maxHandlers; i++ {
|
||||
tks <- true
|
||||
}
|
||||
|
||||
agent := &basicAgent{
|
||||
context: ctx,
|
||||
namespace: suite.namespace,
|
||||
client: NewClient(ctx),
|
||||
events: make(chan *Event, maxEventChanBuffer),
|
||||
tokens: tks,
|
||||
redisPool: suite.pool,
|
||||
}
|
||||
agent.Attach(suite.lcmCtl)
|
||||
|
||||
changeData := &job.StatusChange{
|
||||
JobID: "fake_job_ID",
|
||||
Status: job.RunningStatus.String(),
|
||||
}
|
||||
|
||||
evt := &Event{
|
||||
URL: "https://fake.js.com",
|
||||
Message: fmt.Sprintf("status of job %s change to %s", changeData.JobID, changeData.Status),
|
||||
Data: changeData,
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Mock job stats
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyJobStats(suite.namespace, "fake_job_ID")
|
||||
_, err := conn.Do("HSET", key, "status", job.SuccessStatus.String())
|
||||
require.Nil(suite.T(), err, "prepare job stats: nil error returned but got %s", err)
|
||||
|
||||
err = agent.pushForRetry(evt)
|
||||
require.Nil(suite.T(), err, "push for retry: nil error expected but got %s", err)
|
||||
|
||||
err = agent.reSend()
|
||||
require.Error(suite.T(), err, "resend: non nil error expected but got nil")
|
||||
assert.Equal(suite.T(), 0, len(agent.events), "the hook event should be discard but actually not")
|
||||
|
||||
// Change status
|
||||
_, err = conn.Do("HSET", key, "status", job.PendingStatus.String())
|
||||
require.Nil(suite.T(), err, "prepare job stats: nil error returned but got %s", err)
|
||||
|
||||
err = agent.pushForRetry(evt)
|
||||
require.Nil(suite.T(), err, "push for retry: nil error expected but got %s", err)
|
||||
err = agent.reSend()
|
||||
require.Nil(suite.T(), err, "resend: nil error should be returned but got %s", err)
|
||||
|
||||
<-time.After(time.Duration(1) * time.Second)
|
||||
|
||||
assert.Equal(suite.T(), 1, len(agent.events), "the hook event should be requeued but actually not: %d", len(agent.events))
|
||||
}
|
136
src/jobservice/hook/hook_client.go
Normal file
136
src/jobservice/hook/hook_client.go
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package hook
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
proxyEnvHTTP = "http_proxy"
|
||||
proxyEnvHTTPS = "https_proxy"
|
||||
)
|
||||
|
||||
// Client for handling the hook events
|
||||
type Client interface {
|
||||
// SendEvent send the event to the subscribed parties
|
||||
SendEvent(evt *Event) error
|
||||
}
|
||||
|
||||
// Client is used to post the related data to the interested parties.
|
||||
type basicClient struct {
|
||||
client *http.Client
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewClient return the ptr of the new hook client
|
||||
func NewClient(ctx context.Context) Client {
|
||||
// Create transport
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 20,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ResponseHeaderTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
// Get the http/https proxies
|
||||
proxyAddr, ok := os.LookupEnv(proxyEnvHTTP)
|
||||
if !ok {
|
||||
proxyAddr, ok = os.LookupEnv(proxyEnvHTTPS)
|
||||
}
|
||||
|
||||
if ok && !utils.IsEmptyStr(proxyAddr) {
|
||||
proxyURL, err := url.Parse(proxyAddr)
|
||||
if err == nil {
|
||||
transport.Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: 15 * time.Second,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
return &basicClient{
|
||||
client: client,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// ReportStatus reports the status change info to the subscribed party.
|
||||
// The status includes 'checkin' info with format 'check_in:<message>'
|
||||
func (bc *basicClient) SendEvent(evt *Event) error {
|
||||
if evt == nil {
|
||||
return errors.New("nil event")
|
||||
}
|
||||
|
||||
if err := evt.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Marshal data
|
||||
data, err := json.Marshal(evt.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// New post request
|
||||
req, err := http.NewRequest(http.MethodPost, evt.URL, strings.NewReader(string(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := bc.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}() // close connection for reuse
|
||||
|
||||
// Should be 200
|
||||
if res.StatusCode != http.StatusOK {
|
||||
if res.ContentLength > 0 {
|
||||
// read error content and return
|
||||
dt, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(string(dt))
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to report status change via hook, expect '200' but got '%d'", res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
105
src/jobservice/hook/hook_client_test.go
Normal file
105
src/jobservice/hook/hook_client_test.go
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package hook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HookClientTestSuite tests functions of hook client
|
||||
type HookClientTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
mockServer *httptest.Server
|
||||
client Client
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *HookClientTestSuite) SetupSuite() {
|
||||
suite.client = NewClient(context.Background())
|
||||
suite.mockServer = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
bytes, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
change := &job.StatusChange{}
|
||||
err = json.Unmarshal(bytes, change)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if change.JobID == "job_ID_failed" {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, "ok")
|
||||
}))
|
||||
}
|
||||
|
||||
// TearDownSuite clears test suite
|
||||
func (suite *HookClientTestSuite) TearDownSuite() {
|
||||
suite.mockServer.Close()
|
||||
}
|
||||
|
||||
// TestHookClientTestSuite is entry of go test
|
||||
func TestHookClientTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(HookClientTestSuite))
|
||||
}
|
||||
|
||||
// TestHookClient ...
|
||||
func (suite *HookClientTestSuite) TestHookClient() {
|
||||
changeData := &job.StatusChange{
|
||||
JobID: "fake_job_ID",
|
||||
Status: "running",
|
||||
}
|
||||
evt := &Event{
|
||||
URL: suite.mockServer.URL,
|
||||
Data: changeData,
|
||||
Message: fmt.Sprintf("Status of job %s changed to: %s", changeData.JobID, changeData.Status),
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
err := suite.client.SendEvent(evt)
|
||||
assert.Nil(suite.T(), err, "send event: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// TestReportStatusFailed ...
|
||||
func (suite *HookClientTestSuite) TestReportStatusFailed() {
|
||||
changeData := &job.StatusChange{
|
||||
JobID: "job_ID_failed",
|
||||
Status: "running",
|
||||
}
|
||||
evt := &Event{
|
||||
URL: suite.mockServer.URL,
|
||||
Data: changeData,
|
||||
Message: fmt.Sprintf("Status of job %s changed to: %s", changeData.JobID, changeData.Status),
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
err := suite.client.SendEvent(evt)
|
||||
assert.NotNil(suite.T(), err, "send event: expected non nil error but got nil")
|
||||
}
|
@ -12,27 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package env
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
// JobContext is combination of BaseContext and other job specified resources.
|
||||
// JobContext will be the real execution context for one job.
|
||||
type JobContext interface {
|
||||
// Context is combination of BaseContext and other job specified resources.
|
||||
// Context will be the real execution context for one job.
|
||||
type Context interface {
|
||||
// Build the context based on the parent context
|
||||
//
|
||||
// dep JobData : Dependencies for building the context, just in case that the build
|
||||
// function need some external info
|
||||
// A new job context will be generated based on the current context
|
||||
// for the provided job.
|
||||
//
|
||||
// Returns:
|
||||
// new JobContext based on the parent one
|
||||
// new Context based on the parent one
|
||||
// error if meet any problems
|
||||
Build(dep JobData) (JobContext, error)
|
||||
Build(tracker Tracker) (Context, error)
|
||||
|
||||
// Get property from the context
|
||||
//
|
||||
@ -57,27 +56,19 @@ type JobContext interface {
|
||||
// error if meet any problems
|
||||
Checkin(status string) error
|
||||
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
// OPCommand return the control operational command like stop if have
|
||||
//
|
||||
// Returns:
|
||||
// op command if have
|
||||
// flag to indicate if have command
|
||||
OPCommand() (string, bool)
|
||||
OPCommand() (OPCommand, bool)
|
||||
|
||||
// Return the logger
|
||||
GetLogger() logger.Interface
|
||||
|
||||
// Launch sub jobs
|
||||
LaunchJob(req models.JobRequest) (models.JobStats, error)
|
||||
// Get tracker
|
||||
Tracker() Tracker
|
||||
}
|
||||
|
||||
// JobData defines job context dependencies.
|
||||
type JobData struct {
|
||||
ID string
|
||||
Name string
|
||||
Args map[string]interface{}
|
||||
ExtraData map[string]interface{}
|
||||
}
|
||||
|
||||
// JobContextInitializer is a func to initialize the concrete job context
|
||||
type JobContextInitializer func(ctx *Context) (JobContext, error)
|
||||
// ContextInitializer is a func to initialize the concrete job context
|
||||
type ContextInitializer func(ctx context.Context) (Context, error)
|
@ -16,22 +16,17 @@ package impl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"errors"
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/sweeper"
|
||||
jmodel "github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -42,24 +37,14 @@ const (
|
||||
type Context struct {
|
||||
// System context
|
||||
sysContext context.Context
|
||||
|
||||
// Logger for job
|
||||
logger logger.Interface
|
||||
|
||||
// op command func
|
||||
opCommandFunc job.CheckOPCmdFunc
|
||||
|
||||
// checkin func
|
||||
checkInFunc job.CheckInFunc
|
||||
|
||||
// launch job
|
||||
launchJobFunc job.LaunchJobFunc
|
||||
|
||||
// other required information
|
||||
properties map[string]interface{}
|
||||
|
||||
// admin server client
|
||||
cfgMgr comcfg.CfgManager
|
||||
// job life cycle tracker
|
||||
tracker job.Tracker
|
||||
}
|
||||
|
||||
// NewContext ...
|
||||
@ -107,11 +92,16 @@ func (c *Context) Init() error {
|
||||
|
||||
// Build implements the same method in env.JobContext interface
|
||||
// This func will build the job execution context before running
|
||||
func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
func (c *Context) Build(tracker job.Tracker) (job.Context, error) {
|
||||
if tracker == nil || tracker.Job() == nil {
|
||||
return nil, errors.New("nil job tracker")
|
||||
}
|
||||
|
||||
jContext := &Context{
|
||||
sysContext: c.sysContext,
|
||||
cfgMgr: c.cfgMgr,
|
||||
properties: make(map[string]interface{}),
|
||||
tracker: tracker,
|
||||
}
|
||||
|
||||
// Copy properties
|
||||
@ -123,55 +113,21 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
|
||||
// Refresh config properties
|
||||
err := c.cfgMgr.Load()
|
||||
props := c.cfgMgr.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
props := c.cfgMgr.GetAll()
|
||||
for k, v := range props {
|
||||
jContext.properties[k] = v
|
||||
}
|
||||
|
||||
// Set loggers for job
|
||||
if err := setLoggers(func(lg logger.Interface) {
|
||||
jContext.logger = lg
|
||||
}, dep.ID); err != nil {
|
||||
lg, err := createLoggers(tracker.Job().Info.JobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opCommandFunc, ok := dep.ExtraData["opCommandFunc"]; ok {
|
||||
if reflect.TypeOf(opCommandFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := opCommandFunc.(job.CheckOPCmdFunc); ok {
|
||||
jContext.opCommandFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
if jContext.opCommandFunc == nil {
|
||||
return nil, errors.New("failed to inject opCommandFunc")
|
||||
}
|
||||
|
||||
if checkInFunc, ok := dep.ExtraData["checkInFunc"]; ok {
|
||||
if reflect.TypeOf(checkInFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := checkInFunc.(job.CheckInFunc); ok {
|
||||
jContext.checkInFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.checkInFunc == nil {
|
||||
return nil, errors.New("failed to inject checkInFunc")
|
||||
}
|
||||
|
||||
if launchJobFunc, ok := dep.ExtraData["launchJobFunc"]; ok {
|
||||
if reflect.TypeOf(launchJobFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := launchJobFunc.(job.LaunchJobFunc); ok {
|
||||
jContext.launchJobFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.launchJobFunc == nil {
|
||||
return nil, errors.New("failed to inject launchJobFunc")
|
||||
}
|
||||
jContext.logger = lg
|
||||
|
||||
return jContext, nil
|
||||
}
|
||||
@ -189,22 +145,21 @@ func (c *Context) SystemContext() context.Context {
|
||||
|
||||
// Checkin is bridge func for reporting detailed status
|
||||
func (c *Context) Checkin(status string) error {
|
||||
if c.checkInFunc != nil {
|
||||
c.checkInFunc(status)
|
||||
} else {
|
||||
return errors.New("nil check in function")
|
||||
}
|
||||
|
||||
return nil
|
||||
return c.tracker.CheckIn(status)
|
||||
}
|
||||
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
func (c *Context) OPCommand() (string, bool) {
|
||||
if c.opCommandFunc != nil {
|
||||
return c.opCommandFunc()
|
||||
func (c *Context) OPCommand() (job.OPCommand, bool) {
|
||||
latest, err := c.tracker.Status()
|
||||
if err != nil {
|
||||
return job.NilCommand, false
|
||||
}
|
||||
|
||||
return "", false
|
||||
if job.StoppedStatus == latest {
|
||||
return job.StopCommand, true
|
||||
}
|
||||
|
||||
return job.NilCommand, false
|
||||
}
|
||||
|
||||
// GetLogger returns the logger
|
||||
@ -212,54 +167,31 @@ func (c *Context) GetLogger() logger.Interface {
|
||||
return c.logger
|
||||
}
|
||||
|
||||
// LaunchJob launches sub jobs
|
||||
func (c *Context) LaunchJob(req jmodel.JobRequest) (jmodel.JobStats, error) {
|
||||
if c.launchJobFunc == nil {
|
||||
return jmodel.JobStats{}, errors.New("nil launch job function")
|
||||
}
|
||||
|
||||
return c.launchJobFunc(req)
|
||||
// Tracker returns the job tracker attached with the context
|
||||
func (c *Context) Tracker() job.Tracker {
|
||||
return c.tracker
|
||||
}
|
||||
|
||||
func getDBFromConfig(cfg map[string]interface{}) *models.Database {
|
||||
database := &models.Database{}
|
||||
database.Type = cfg[common.DatabaseType].(string)
|
||||
postgresql := &models.PostGreSQL{}
|
||||
postgresql.Host = cfg[common.PostGreSQLHOST].(string)
|
||||
postgresql.Port = int(cfg[common.PostGreSQLPort].(float64))
|
||||
postgresql.Username = cfg[common.PostGreSQLUsername].(string)
|
||||
postgresql.Password = cfg[common.PostGreSQLPassword].(string)
|
||||
postgresql.Database = cfg[common.PostGreSQLDatabase].(string)
|
||||
postgresql.SSLMode = cfg[common.PostGreSQLSSLMode].(string)
|
||||
database.PostGreSQL = postgresql
|
||||
|
||||
return database
|
||||
}
|
||||
|
||||
// create loggers based on the configurations and set it to the job executing context.
|
||||
func setLoggers(setter func(lg logger.Interface), jobID string) error {
|
||||
if setter == nil {
|
||||
return errors.New("missing setter func")
|
||||
}
|
||||
|
||||
// create loggers based on the configurations.
|
||||
func createLoggers(jobID string) (logger.Interface, error) {
|
||||
// Init job loggers here
|
||||
lOptions := []logger.Option{}
|
||||
lOptions := make([]logger.Option, 0)
|
||||
for _, lc := range config.DefaultConfig.JobLoggerConfigs {
|
||||
// For running job, the depth should be 5
|
||||
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameStdOutput || lc.Name == logger.LoggerNameDB {
|
||||
if lc.Name == logger.NameFile || lc.Name == logger.NameStdOutput || lc.Name == logger.NameDB {
|
||||
if lc.Settings == nil {
|
||||
lc.Settings = map[string]interface{}{}
|
||||
}
|
||||
lc.Settings["depth"] = 5
|
||||
}
|
||||
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameDB {
|
||||
if lc.Name == logger.NameFile || lc.Name == logger.NameDB {
|
||||
// Need extra param
|
||||
fSettings := map[string]interface{}{}
|
||||
for k, v := range lc.Settings {
|
||||
// Copy settings
|
||||
fSettings[k] = v
|
||||
}
|
||||
if lc.Name == logger.LoggerNameFile {
|
||||
if lc.Name == logger.NameFile {
|
||||
// Append file name param
|
||||
fSettings["filename"] = fmt.Sprintf("%s.log", jobID)
|
||||
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, fSettings))
|
||||
@ -273,14 +205,7 @@ func setLoggers(setter func(lg logger.Interface), jobID string) error {
|
||||
}
|
||||
}
|
||||
// Get logger for the job
|
||||
lg, err := logger.GetLogger(lOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initialize job logger error: %s", err)
|
||||
}
|
||||
|
||||
setter(lg)
|
||||
|
||||
return nil
|
||||
return logger.GetLogger(lOptions...)
|
||||
}
|
||||
|
||||
func initDBCompleted() error {
|
||||
|
126
src/jobservice/job/impl/context_test.go
Normal file
126
src/jobservice/job/impl/context_test.go
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package impl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// ContextImplTestSuite tests functions of context impl
|
||||
type ContextImplTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
tracker job.Tracker
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
jobID string
|
||||
}
|
||||
|
||||
// TestContextImplTestSuite is entry of go test
|
||||
func TestContextImplTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(ContextImplTestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *ContextImplTestSuite) SetupSuite() {
|
||||
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
|
||||
{
|
||||
Name: "STD_OUTPUT",
|
||||
Level: "DEBUG",
|
||||
},
|
||||
{
|
||||
Name: "FILE",
|
||||
Level: "INFO",
|
||||
Settings: map[string]interface{}{
|
||||
"base_dir": os.TempDir(),
|
||||
},
|
||||
Sweeper: &config.LogSweeperConfig{
|
||||
Duration: 1,
|
||||
Settings: map[string]interface{}{
|
||||
"work_dir": os.TempDir(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
|
||||
suite.jobID = utils.MakeIdentifier()
|
||||
mockStats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: suite.jobID,
|
||||
JobKind: job.KindGeneric,
|
||||
JobName: job.SampleJob,
|
||||
Status: job.PendingStatus.String(),
|
||||
IsUnique: false,
|
||||
},
|
||||
}
|
||||
|
||||
suite.tracker = job.NewBasicTrackerWithStats(
|
||||
context.Background(),
|
||||
mockStats,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
nil,
|
||||
)
|
||||
|
||||
err := suite.tracker.Save()
|
||||
require.NoError(suite.T(), err, "job stats: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// SetupSuite clears test suite
|
||||
func (suite *ContextImplTestSuite) TearDownSuite() {
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestContextImpl tests the context impl
|
||||
func (suite *ContextImplTestSuite) TestContextImpl() {
|
||||
cfgMem := comcfg.NewInMemoryManager()
|
||||
cfgMem.Set("read_only", "true")
|
||||
ctx := NewContext(context.Background(), cfgMem)
|
||||
jCtx, err := ctx.Build(suite.tracker)
|
||||
|
||||
require.NoErrorf(suite.T(), err, "build job context: nil error expected but got %s", err)
|
||||
v, ok := jCtx.Get("read_only")
|
||||
assert.Equal(suite.T(), true, ok)
|
||||
assert.Equal(suite.T(), v, v.(bool))
|
||||
|
||||
err = jCtx.Checkin("check in testing")
|
||||
assert.NoErrorf(suite.T(), err, "check in: nil error expected but got %s", err)
|
||||
|
||||
l := jCtx.GetLogger()
|
||||
assert.NotNil(suite.T(), l, "logger should be not nil")
|
||||
|
||||
_, ok = jCtx.OPCommand()
|
||||
assert.Equal(suite.T(), false, ok)
|
||||
}
|
@ -17,144 +17,97 @@ package impl
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
jmodel "github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
// DefaultContext provides a basic job context
|
||||
type DefaultContext struct {
|
||||
// System context
|
||||
sysContext context.Context
|
||||
|
||||
// Logger for job
|
||||
logger logger.Interface
|
||||
|
||||
// op command func
|
||||
opCommandFunc job.CheckOPCmdFunc
|
||||
|
||||
// checkin func
|
||||
checkInFunc job.CheckInFunc
|
||||
|
||||
// launch job
|
||||
launchJobFunc job.LaunchJobFunc
|
||||
|
||||
// other required information
|
||||
// Other required information
|
||||
properties map[string]interface{}
|
||||
// Track the job attached with the context
|
||||
tracker job.Tracker
|
||||
}
|
||||
|
||||
// NewDefaultContext is constructor of building DefaultContext
|
||||
func NewDefaultContext(sysCtx context.Context) env.JobContext {
|
||||
func NewDefaultContext(sysCtx context.Context) job.Context {
|
||||
return &DefaultContext{
|
||||
sysContext: sysCtx,
|
||||
properties: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Build implements the same method in env.JobContext interface
|
||||
// Build implements the same method in env.Context interface
|
||||
// This func will build the job execution context before running
|
||||
func (c *DefaultContext) Build(dep env.JobData) (env.JobContext, error) {
|
||||
func (dc *DefaultContext) Build(t job.Tracker) (job.Context, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("nil job tracker")
|
||||
}
|
||||
|
||||
jContext := &DefaultContext{
|
||||
sysContext: c.sysContext,
|
||||
sysContext: dc.sysContext,
|
||||
tracker: t,
|
||||
properties: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Copy properties
|
||||
if len(c.properties) > 0 {
|
||||
for k, v := range c.properties {
|
||||
if len(dc.properties) > 0 {
|
||||
for k, v := range dc.properties {
|
||||
jContext.properties[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set loggers for job
|
||||
if err := setLoggers(func(lg logger.Interface) {
|
||||
jContext.logger = lg
|
||||
}, dep.ID); err != nil {
|
||||
lg, err := createLoggers(t.Job().Info.JobID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if opCommandFunc, ok := dep.ExtraData["opCommandFunc"]; ok {
|
||||
if reflect.TypeOf(opCommandFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := opCommandFunc.(job.CheckOPCmdFunc); ok {
|
||||
jContext.opCommandFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
if jContext.opCommandFunc == nil {
|
||||
return nil, errors.New("failed to inject opCommandFunc")
|
||||
}
|
||||
|
||||
if checkInFunc, ok := dep.ExtraData["checkInFunc"]; ok {
|
||||
if reflect.TypeOf(checkInFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := checkInFunc.(job.CheckInFunc); ok {
|
||||
jContext.checkInFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.checkInFunc == nil {
|
||||
return nil, errors.New("failed to inject checkInFunc")
|
||||
}
|
||||
|
||||
if launchJobFunc, ok := dep.ExtraData["launchJobFunc"]; ok {
|
||||
if reflect.TypeOf(launchJobFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := launchJobFunc.(job.LaunchJobFunc); ok {
|
||||
jContext.launchJobFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.launchJobFunc == nil {
|
||||
return nil, errors.New("failed to inject launchJobFunc")
|
||||
}
|
||||
jContext.logger = lg
|
||||
|
||||
return jContext, nil
|
||||
}
|
||||
|
||||
// Get implements the same method in env.JobContext interface
|
||||
func (c *DefaultContext) Get(prop string) (interface{}, bool) {
|
||||
v, ok := c.properties[prop]
|
||||
// Get implements the same method in env.Context interface
|
||||
func (dc *DefaultContext) Get(prop string) (interface{}, bool) {
|
||||
v, ok := dc.properties[prop]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// SystemContext implements the same method in env.JobContext interface
|
||||
func (c *DefaultContext) SystemContext() context.Context {
|
||||
return c.sysContext
|
||||
// SystemContext implements the same method in env.Context interface
|
||||
func (dc *DefaultContext) SystemContext() context.Context {
|
||||
return dc.sysContext
|
||||
}
|
||||
|
||||
// Checkin is bridge func for reporting detailed status
|
||||
func (c *DefaultContext) Checkin(status string) error {
|
||||
if c.checkInFunc != nil {
|
||||
c.checkInFunc(status)
|
||||
} else {
|
||||
return errors.New("nil check in function")
|
||||
}
|
||||
|
||||
return nil
|
||||
func (dc *DefaultContext) Checkin(status string) error {
|
||||
return dc.tracker.CheckIn(status)
|
||||
}
|
||||
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
func (c *DefaultContext) OPCommand() (string, bool) {
|
||||
if c.opCommandFunc != nil {
|
||||
return c.opCommandFunc()
|
||||
// OPCommand return the control operational command like stop if have
|
||||
func (dc *DefaultContext) OPCommand() (job.OPCommand, bool) {
|
||||
latest, err := dc.tracker.Status()
|
||||
if err != nil {
|
||||
return job.NilCommand, false
|
||||
}
|
||||
|
||||
return "", false
|
||||
if job.StoppedStatus == latest {
|
||||
return job.StopCommand, true
|
||||
}
|
||||
|
||||
return job.NilCommand, false
|
||||
}
|
||||
|
||||
// GetLogger returns the logger
|
||||
func (c *DefaultContext) GetLogger() logger.Interface {
|
||||
return c.logger
|
||||
func (dc *DefaultContext) GetLogger() logger.Interface {
|
||||
return dc.logger
|
||||
}
|
||||
|
||||
// LaunchJob launches sub jobs
|
||||
func (c *DefaultContext) LaunchJob(req jmodel.JobRequest) (jmodel.JobStats, error) {
|
||||
if c.launchJobFunc == nil {
|
||||
return jmodel.JobStats{}, errors.New("nil launch job function")
|
||||
}
|
||||
|
||||
return c.launchJobFunc(req)
|
||||
// Tracker returns the tracker tracking the job attached with the context
|
||||
func (dc *DefaultContext) Tracker() job.Tracker {
|
||||
return dc.tracker
|
||||
}
|
||||
|
@ -1,104 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package impl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
func TestDefaultContext(t *testing.T) {
|
||||
defaultContext := NewDefaultContext(context.Background())
|
||||
jobData := env.JobData{
|
||||
ID: "fake_id",
|
||||
Name: "DEMO",
|
||||
Args: make(map[string]interface{}),
|
||||
ExtraData: make(map[string]interface{}),
|
||||
}
|
||||
var opCmdFund job.CheckOPCmdFunc = func() (string, bool) {
|
||||
return "stop", true
|
||||
}
|
||||
var checkInFunc job.CheckInFunc = func(msg string) {
|
||||
fmt.Println(msg)
|
||||
}
|
||||
var launchJobFunc job.LaunchJobFunc = func(req models.JobRequest) (models.JobStats, error) {
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_sub_job_id",
|
||||
Status: "pending",
|
||||
JobName: "DEMO",
|
||||
JobKind: job.JobKindGeneric,
|
||||
EnqueueTime: time.Now().Unix(),
|
||||
UpdateTime: time.Now().Unix(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
jobData.ExtraData["opCommandFunc"] = opCmdFund
|
||||
jobData.ExtraData["checkInFunc"] = checkInFunc
|
||||
jobData.ExtraData["launchJobFunc"] = launchJobFunc
|
||||
|
||||
oldLogConfig := config.DefaultConfig.JobLoggerConfigs
|
||||
defer func() {
|
||||
config.DefaultConfig.JobLoggerConfigs = oldLogConfig
|
||||
}()
|
||||
|
||||
logSettings := map[string]interface{}{}
|
||||
logSettings["base_dir"] = os.TempDir()
|
||||
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
|
||||
{
|
||||
Level: "DEBUG",
|
||||
Name: "FILE",
|
||||
Settings: logSettings,
|
||||
},
|
||||
}
|
||||
|
||||
newJobContext, err := defaultContext.Build(jobData)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd, ok := newJobContext.OPCommand()
|
||||
|
||||
if !ok || cmd != "stop" {
|
||||
t.Fatalf("expect op command 'stop' but got %s", cmd)
|
||||
}
|
||||
|
||||
if err := newJobContext.Checkin("hello"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stats, err := newJobContext.LaunchJob(models.JobRequest{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.Stats.JobID != "fake_sub_job_id" {
|
||||
t.Fatalf("expect job id 'fake_sub_job_id' but got %s", stats.Stats.JobID)
|
||||
}
|
||||
|
||||
ctx := newJobContext.SystemContext()
|
||||
if ctx == nil {
|
||||
t.Fatal("got nil system context")
|
||||
}
|
||||
|
||||
}
|
@ -1,127 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package impl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
)
|
||||
|
||||
// DemoJob is the job to demostrate the job interface.
|
||||
type DemoJob struct{}
|
||||
|
||||
// MaxFails is implementation of same method in Interface.
|
||||
func (dj *DemoJob) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
// ShouldRetry ...
|
||||
func (dj *DemoJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Validate is implementation of same method in Interface.
|
||||
func (dj *DemoJob) Validate(params map[string]interface{}) error {
|
||||
if params == nil || len(params) == 0 {
|
||||
return errors.New("parameters required for replication job")
|
||||
}
|
||||
name, ok := params["image"]
|
||||
if !ok {
|
||||
return errors.New("missing parameter 'image'")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(name.(string), "demo") {
|
||||
return fmt.Errorf("expected '%s' but got '%s'", "demo steven", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the replication logic here.
|
||||
func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
logger := ctx.GetLogger()
|
||||
|
||||
defer func() {
|
||||
logger.Info("I'm finished, exit!")
|
||||
}()
|
||||
|
||||
fmt.Println("I'm running")
|
||||
logger.Infof("params: %#v\n", params)
|
||||
logger.Infof("context: %#v\n", ctx)
|
||||
if v, ok := ctx.Get("email_from"); ok {
|
||||
fmt.Printf("Get prop form context: email_from=%s\n", v)
|
||||
}
|
||||
/*if u, err := dao.GetUser(models.User{}); err == nil {
|
||||
fmt.Printf("u=%#+v\n", u)
|
||||
}*/
|
||||
|
||||
logger.Info("check in 30%")
|
||||
ctx.Checkin("30%")
|
||||
time.Sleep(2 * time.Second)
|
||||
logger.Warning("check in 60%")
|
||||
ctx.Checkin("60%")
|
||||
time.Sleep(2 * time.Second)
|
||||
logger.Debug("check in 100%")
|
||||
ctx.Checkin("100%")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// HOLD ON FOR A WHILE
|
||||
logger.Error("Holding for 5 sec")
|
||||
<-time.After(5 * time.Second)
|
||||
|
||||
if cmd, ok := ctx.OPCommand(); ok {
|
||||
logger.Infof("cmd=%s\n", cmd)
|
||||
fmt.Printf("Receive OP command: %s\n", cmd)
|
||||
if cmd == opm.CtlCommandCancel {
|
||||
logger.Info("exit for receiving cancel signal")
|
||||
return errs.JobCancelledError()
|
||||
}
|
||||
|
||||
logger.Info("exit for receiving stop signal")
|
||||
return errs.JobStoppedError()
|
||||
}
|
||||
|
||||
/*fmt.Println("Launch sub job")
|
||||
jobParams := make(map[string]interface{})
|
||||
jobParams["image"] = "demo:1.7"
|
||||
subDemoJob := models.JobRequest{
|
||||
Job: &models.JobData{
|
||||
Name: "DEMO",
|
||||
Parameters: jobParams,
|
||||
Metadata: &models.JobMetadata{
|
||||
JobKind: job.JobKindGeneric,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
subJob, err := ctx.LaunchJob(subDemoJob)
|
||||
if err != nil {
|
||||
fmt.Printf("Create sub job failed with error: %s\n", err)
|
||||
logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Sub job: %v", subJob)*/
|
||||
|
||||
fmt.Println("I'm close to end")
|
||||
|
||||
return nil
|
||||
}
|
@ -23,7 +23,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/common/registryctl"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/registryctl/client"
|
||||
)
|
||||
@ -56,12 +56,12 @@ func (gc *GarbageCollector) ShouldRetry() bool {
|
||||
}
|
||||
|
||||
// Validate implements the interface in job/Interface
|
||||
func (gc *GarbageCollector) Validate(params map[string]interface{}) error {
|
||||
func (gc *GarbageCollector) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run implements the interface in job/Interface
|
||||
func (gc *GarbageCollector) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (gc *GarbageCollector) Run(ctx job.Context, params job.Parameters) error {
|
||||
if err := gc.init(ctx, params); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -93,12 +93,12 @@ func (gc *GarbageCollector) Run(ctx env.JobContext, params map[string]interface{
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc *GarbageCollector) init(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (gc *GarbageCollector) init(ctx job.Context, params job.Parameters) error {
|
||||
registryctl.Init()
|
||||
gc.registryCtlClient = registryctl.RegistryCtlClient
|
||||
gc.logger = ctx.GetLogger()
|
||||
|
||||
errTpl := "Failed to get required property: %s"
|
||||
errTpl := "failed to get required property: %s"
|
||||
if v, ok := ctx.Get(common.CoreURL); ok && len(v.(string)) > 0 {
|
||||
gc.CoreURL = v.(string)
|
||||
} else {
|
||||
@ -165,7 +165,7 @@ func (gc *GarbageCollector) cleanCache() error {
|
||||
|
||||
func delKeys(con redis.Conn, pattern string) error {
|
||||
iter := 0
|
||||
keys := []string{}
|
||||
keys := make([]string, 0)
|
||||
for {
|
||||
arr, err := redis.Values(con.Do("SCAN", iter, "MATCH", pattern))
|
||||
if err != nil {
|
||||
|
@ -18,8 +18,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"github.com/goharbor/harbor/src/replication/transfer"
|
||||
|
||||
@ -51,13 +50,13 @@ func (r *Replication) ShouldRetry() bool {
|
||||
}
|
||||
|
||||
// Validate does nothing
|
||||
func (r *Replication) Validate(params map[string]interface{}) error {
|
||||
func (r *Replication) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run gets the corresponding transfer according to the resource type
|
||||
// and calls its function to do the real work
|
||||
func (r *Replication) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (r *Replication) Run(ctx job.Context, params job.Parameters) error {
|
||||
logger := ctx.GetLogger()
|
||||
|
||||
src, dst, err := parseParams(params)
|
||||
@ -77,15 +76,15 @@ func (r *Replication) Run(ctx env.JobContext, params map[string]interface{}) err
|
||||
if !exist {
|
||||
return false
|
||||
}
|
||||
return cmd == opm.CtlCommandStop
|
||||
return cmd == job.StopCommand
|
||||
}
|
||||
transfer, err := factory(ctx.GetLogger(), stopFunc)
|
||||
trans, err := factory(ctx.GetLogger(), stopFunc)
|
||||
if err != nil {
|
||||
logger.Errorf("failed to create transfer: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return transfer.Transfer(src, dst)
|
||||
return trans.Transfer(src, dst)
|
||||
}
|
||||
|
||||
func parseParams(params map[string]interface{}) (*model.Resource, *model.Resource, error) {
|
||||
|
@ -22,9 +22,7 @@ import (
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
"github.com/goharbor/harbor/src/common/http/modifier/auth"
|
||||
reg "github.com/goharbor/harbor/src/common/utils/registry"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
)
|
||||
|
||||
@ -32,7 +30,7 @@ import (
|
||||
// a scheduler when submitting it as a scheduled job. It receives
|
||||
// a URL and data, and post the data to the URL when it is running
|
||||
type Scheduler struct {
|
||||
ctx env.JobContext
|
||||
ctx job.Context
|
||||
}
|
||||
|
||||
// ShouldRetry ...
|
||||
@ -46,15 +44,15 @@ func (s *Scheduler) MaxFails() uint {
|
||||
}
|
||||
|
||||
// Validate ....
|
||||
func (s *Scheduler) Validate(params map[string]interface{}) error {
|
||||
func (s *Scheduler) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run ...
|
||||
func (s *Scheduler) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (s *Scheduler) Run(ctx job.Context, params job.Parameters) error {
|
||||
cmd, exist := ctx.OPCommand()
|
||||
if exist && cmd == opm.CtlCommandStop {
|
||||
return errs.JobStoppedError()
|
||||
if exist && cmd == job.StopCommand {
|
||||
return nil
|
||||
}
|
||||
logger := ctx.GetLogger()
|
||||
|
||||
|
87
src/jobservice/job/impl/sample/job.go
Normal file
87
src/jobservice/job/impl/sample/job.go
Normal file
@ -0,0 +1,87 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sample
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
)
|
||||
|
||||
// Job is a sample to show how to implement a job.
|
||||
type Job struct{}
|
||||
|
||||
// MaxFails is implementation of same method in Interface.
|
||||
func (j *Job) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
// ShouldRetry ...
|
||||
func (j *Job) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Validate is implementation of same method in Interface.
|
||||
func (j *Job) Validate(params job.Parameters) error {
|
||||
if params == nil || len(params) == 0 {
|
||||
return errors.New("parameters required for replication job")
|
||||
}
|
||||
name, ok := params["image"]
|
||||
if !ok {
|
||||
return errors.New("missing parameter 'image'")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(name.(string), "demo") {
|
||||
return fmt.Errorf("expected '%s' but got '%s'", "demo *", name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the replication logic here.
|
||||
func (j *Job) Run(ctx job.Context, params job.Parameters) error {
|
||||
logger := ctx.GetLogger()
|
||||
|
||||
logger.Info("Sample job starting")
|
||||
defer func() {
|
||||
logger.Info("Sample job exit")
|
||||
}()
|
||||
|
||||
logger.Infof("Params: %#v\n", params)
|
||||
if v, ok := ctx.Get("sample"); ok {
|
||||
fmt.Printf("Get prop form context: sample=%s\n", v)
|
||||
}
|
||||
|
||||
ctx.Checkin("progress data: %30")
|
||||
<-time.After(1 * time.Second)
|
||||
ctx.Checkin("progress data: %60")
|
||||
|
||||
// HOLD ON FOR A WHILE
|
||||
logger.Warning("Holding for 10 seconds")
|
||||
<-time.After(10 * time.Second)
|
||||
|
||||
if cmd, ok := ctx.OPCommand(); ok {
|
||||
if cmd == job.StopCommand {
|
||||
logger.Info("Exit for receiving stop signal")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Successfully exit
|
||||
return nil
|
||||
}
|
@ -25,7 +25,7 @@ import (
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl/utils"
|
||||
)
|
||||
|
||||
@ -50,7 +50,7 @@ func (sa *All) ShouldRetry() bool {
|
||||
}
|
||||
|
||||
// Validate implements the interface in job/Interface
|
||||
func (sa *All) Validate(params map[string]interface{}) error {
|
||||
func (sa *All) Validate(params job.Parameters) error {
|
||||
if len(params) > 0 {
|
||||
return fmt.Errorf("the parms should be empty for scan all job")
|
||||
}
|
||||
@ -58,7 +58,7 @@ func (sa *All) Validate(params map[string]interface{}) error {
|
||||
}
|
||||
|
||||
// Run implements the interface in job/Interface
|
||||
func (sa *All) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (sa *All) Run(ctx job.Context, params job.Parameters) error {
|
||||
logger := ctx.GetLogger()
|
||||
logger.Info("Scanning all the images in the registry")
|
||||
err := sa.init(ctx)
|
||||
@ -107,7 +107,7 @@ func (sa *All) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sa *All) init(ctx env.JobContext) error {
|
||||
func (sa *All) init(ctx job.Context) error {
|
||||
if v, err := getAttrFromCtx(ctx, common.RegistryURL); err == nil {
|
||||
sa.registryURL = v
|
||||
} else {
|
||||
@ -133,9 +133,9 @@ func (sa *All) init(ctx env.JobContext) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAttrFromCtx(ctx env.JobContext, key string) (string, error) {
|
||||
func getAttrFromCtx(ctx job.Context, key string) (string, error) {
|
||||
if v, ok := ctx.Get(key); ok && len(v.(string)) > 0 {
|
||||
return v.(string), nil
|
||||
}
|
||||
return "", fmt.Errorf("Failed to get required property: %s", key)
|
||||
return "", fmt.Errorf("failed to get required property: %s", key)
|
||||
}
|
||||
|
@ -24,11 +24,11 @@ import (
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/job"
|
||||
cjob "github.com/goharbor/harbor/src/common/job"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/clair"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl/utils"
|
||||
)
|
||||
|
||||
@ -51,12 +51,12 @@ func (cj *ClairJob) ShouldRetry() bool {
|
||||
}
|
||||
|
||||
// Validate implements the interface in job/Interface
|
||||
func (cj *ClairJob) Validate(params map[string]interface{}) error {
|
||||
func (cj *ClairJob) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run implements the interface in job/Interface
|
||||
func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
func (cj *ClairJob) Run(ctx job.Context, params job.Parameters) error {
|
||||
logger := ctx.GetLogger()
|
||||
if err := cj.init(ctx); err != nil {
|
||||
logger.Errorf("Failed to initialize the job, error: %v", err)
|
||||
@ -114,8 +114,8 @@ func (cj *ClairJob) Run(ctx env.JobContext, params map[string]interface{}) error
|
||||
return err
|
||||
}
|
||||
|
||||
func (cj *ClairJob) init(ctx env.JobContext) error {
|
||||
errTpl := "Failed to get required property: %s"
|
||||
func (cj *ClairJob) init(ctx job.Context) error {
|
||||
errTpl := "failed to get required property: %s"
|
||||
if v, ok := ctx.Get(common.RegistryURL); ok && len(v.(string)) > 0 {
|
||||
cj.registryURL = v.(string)
|
||||
} else {
|
||||
@ -140,8 +140,8 @@ func (cj *ClairJob) init(ctx env.JobContext) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func transformParam(params map[string]interface{}) (*job.ScanJobParms, error) {
|
||||
res := job.ScanJobParms{}
|
||||
func transformParam(params job.Parameters) (*cjob.ScanJobParms, error) {
|
||||
res := cjob.ScanJobParms{}
|
||||
parmsBytes, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -151,7 +151,7 @@ func transformParam(params map[string]interface{}) (*job.ScanJobParms, error) {
|
||||
}
|
||||
|
||||
func prepareLayers(payload []byte, registryURL, repo, tk string) ([]models.ClairLayer, error) {
|
||||
layers := []models.ClairLayer{}
|
||||
layers := make([]models.ClairLayer, 0)
|
||||
manifest, _, err := distribution.UnmarshalManifest(schema2.MediaTypeManifest, payload)
|
||||
if err != nil {
|
||||
return layers, err
|
||||
|
@ -14,22 +14,6 @@
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
// CheckOPCmdFunc is the function to check if the related operation commands
|
||||
// like STOP or CANCEL is fired for the specified job. If yes, return the
|
||||
// command code for job to determine if take corresponding action.
|
||||
type CheckOPCmdFunc func() (string, bool)
|
||||
|
||||
// CheckInFunc is designed for job to report more detailed progress info
|
||||
type CheckInFunc func(message string)
|
||||
|
||||
// LaunchJobFunc is designed to launch sub jobs in the job
|
||||
type LaunchJobFunc func(req models.JobRequest) (models.JobStats, error)
|
||||
|
||||
// Interface defines the related injection and run entry methods.
|
||||
type Interface interface {
|
||||
// Declare how many times the job can be retried if failed.
|
||||
@ -38,7 +22,7 @@ type Interface interface {
|
||||
// uint: the failure count allowed. If it is set to 0, then default value 4 is used.
|
||||
MaxFails() uint
|
||||
|
||||
// Tell the worker pool if retry the failed job when the fails is
|
||||
// Tell the worker worker if retry the failed job when the fails is
|
||||
// still less that the number declared by the method 'MaxFails'.
|
||||
//
|
||||
// Returns:
|
||||
@ -49,16 +33,16 @@ type Interface interface {
|
||||
//
|
||||
// Return:
|
||||
// error if parameters are not valid. NOTES: If no parameters needed, directly return nil.
|
||||
Validate(params map[string]interface{}) error
|
||||
Validate(params Parameters) error
|
||||
|
||||
// Run the business logic here.
|
||||
// The related arguments will be injected by the workerpool.
|
||||
//
|
||||
// ctx env.JobContext : Job execution context.
|
||||
// ctx Context : Job execution context.
|
||||
// params map[string]interface{} : parameters with key-pair style for the job execution.
|
||||
//
|
||||
// Returns:
|
||||
// error if failed to run. NOTES: If job is stopped or cancelled, a specified error should be returned
|
||||
//
|
||||
Run(ctx env.JobContext, params map[string]interface{}) error
|
||||
Run(ctx Context, params Parameters) error
|
||||
}
|
||||
|
@ -1,32 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
const (
|
||||
// JobStatusPending : job status pending
|
||||
JobStatusPending = "Pending"
|
||||
// JobStatusRunning : job status running
|
||||
JobStatusRunning = "Running"
|
||||
// JobStatusStopped : job status stopped
|
||||
JobStatusStopped = "Stopped"
|
||||
// JobStatusCancelled : job status cancelled
|
||||
JobStatusCancelled = "Cancelled"
|
||||
// JobStatusError : job status error
|
||||
JobStatusError = "Error"
|
||||
// JobStatusSuccess : job status success
|
||||
JobStatusSuccess = "Success"
|
||||
// JobStatusScheduled : job status scheduled
|
||||
JobStatusScheduled = "Scheduled"
|
||||
)
|
@ -15,10 +15,10 @@
|
||||
package job
|
||||
|
||||
const (
|
||||
// JobKindGeneric : Kind of generic job
|
||||
JobKindGeneric = "Generic"
|
||||
// JobKindScheduled : Kind of scheduled job
|
||||
JobKindScheduled = "Scheduled"
|
||||
// JobKindPeriodic : Kind of periodic job
|
||||
JobKindPeriodic = "Periodic"
|
||||
// KindGeneric : Kind of generic job
|
||||
KindGeneric = "Generic"
|
||||
// KindScheduled : Kind of scheduled job
|
||||
KindScheduled = "Scheduled"
|
||||
// KindPeriodic : Kind of periodic job
|
||||
KindPeriodic = "Periodic"
|
||||
)
|
33
src/jobservice/job/known_jobs.go
Normal file
33
src/jobservice/job/known_jobs.go
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
// Define the register name constants of known jobs
|
||||
|
||||
const (
|
||||
// SampleJob is name of demo job
|
||||
SampleJob = "DEMO"
|
||||
|
||||
// ImageScanJob is name of scan job it will be used as key to register to job service.
|
||||
ImageScanJob = "IMAGE_SCAN"
|
||||
// ImageScanAllJob is the name of "scanall" job in job service
|
||||
ImageScanAllJob = "IMAGE_SCAN_ALL"
|
||||
// ImageGC the name of image garbage collection job in job service
|
||||
ImageGC = "IMAGE_GC"
|
||||
// Replication : the name of the replication job in job service
|
||||
Replication = "REPLICATION"
|
||||
// ReplicationScheduler : the name of the replication scheduler job in job service
|
||||
ReplicationScheduler = "IMAGE_REPLICATE"
|
||||
)
|
133
src/jobservice/job/models.go
Normal file
133
src/jobservice/job/models.go
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Parameters for job execution.
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
// Request is the request of launching a job.
|
||||
type Request struct {
|
||||
Job *RequestBody `json:"job"`
|
||||
}
|
||||
|
||||
// RequestBody keeps the basic info.
|
||||
type RequestBody struct {
|
||||
Name string `json:"name"`
|
||||
Parameters Parameters `json:"parameters"`
|
||||
Metadata *Metadata `json:"metadata"`
|
||||
StatusHook string `json:"status_hook"`
|
||||
}
|
||||
|
||||
// Metadata stores the metadata of job.
|
||||
type Metadata struct {
|
||||
JobKind string `json:"kind"`
|
||||
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
|
||||
Cron string `json:"cron_spec,omitempty"`
|
||||
IsUnique bool `json:"unique"`
|
||||
}
|
||||
|
||||
// Stats keeps the result of job launching.
|
||||
type Stats struct {
|
||||
Info *StatsInfo `json:"job"`
|
||||
}
|
||||
|
||||
// StatsInfo keeps the stats of job
|
||||
type StatsInfo struct {
|
||||
JobID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
JobName string `json:"name"`
|
||||
JobKind string `json:"kind"`
|
||||
IsUnique bool `json:"unique"`
|
||||
RefLink string `json:"ref_link,omitempty"`
|
||||
CronSpec string `json:"cron_spec,omitempty"`
|
||||
EnqueueTime int64 `json:"enqueue_time"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
RunAt int64 `json:"run_at,omitempty"`
|
||||
CheckIn string `json:"check_in,omitempty"`
|
||||
CheckInAt int64 `json:"check_in_at,omitempty"`
|
||||
DieAt int64 `json:"die_at,omitempty"`
|
||||
WebHookURL string `json:"web_hook_url,omitempty"`
|
||||
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
|
||||
NumericPID int64 `json:"numeric_policy_id,omitempty"` // The numeric policy ID of the periodic job
|
||||
Parameters Parameters `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
// ActionRequest defines for triggering job action like stop/cancel.
|
||||
type ActionRequest struct {
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// StatusChange is designed for reporting the status change via hook.
|
||||
type StatusChange struct {
|
||||
JobID string `json:"job_id"`
|
||||
Status string `json:"status"`
|
||||
CheckIn string `json:"check_in,omitempty"`
|
||||
Metadata *StatsInfo `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// SimpleStatusChange only keeps job ID and the target status
|
||||
type SimpleStatusChange struct {
|
||||
JobID string `json:"job_id"`
|
||||
TargetStatus string `json:"target_status"`
|
||||
}
|
||||
|
||||
// Validate the job stats
|
||||
func (st *Stats) Validate() error {
|
||||
if st.Info == nil {
|
||||
return errors.New("nil stats body")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(st.Info.JobID) {
|
||||
return errors.New("missing job ID in job stats")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(st.Info.JobName) {
|
||||
return errors.New("missing job name in job stats")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(st.Info.JobKind) {
|
||||
return errors.New("missing job name in job stats")
|
||||
}
|
||||
|
||||
if st.Info.JobKind != KindGeneric &&
|
||||
st.Info.JobKind != KindPeriodic &&
|
||||
st.Info.JobKind != KindScheduled {
|
||||
return errors.Errorf("job kind is not supported: %s", st.Info.JobKind)
|
||||
}
|
||||
|
||||
status := Status(st.Info.Status)
|
||||
if err := status.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if st.Info.JobKind == KindPeriodic {
|
||||
if utils.IsEmptyStr(st.Info.CronSpec) {
|
||||
return errors.New("missing cron spec for periodic job")
|
||||
}
|
||||
}
|
||||
|
||||
if st.Info.JobKind == KindScheduled {
|
||||
if st.Info.RunAt == 0 {
|
||||
return errors.New("enqueue timestamp missing for scheduled job")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -12,4 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
package job
|
||||
|
||||
const (
|
||||
// StopCommand is const for stop command
|
||||
StopCommand OPCommand = "stop"
|
||||
// NilCommand is const for a nil command
|
||||
NilCommand OPCommand = "nil"
|
||||
)
|
||||
|
||||
// OPCommand is the type of job operation commands
|
||||
type OPCommand string
|
||||
|
||||
// IsStop return if the op command is stop
|
||||
func (oc OPCommand) IsStop() bool {
|
||||
return oc == "stop"
|
||||
}
|
82
src/jobservice/job/status.go
Normal file
82
src/jobservice/job/status.go
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
// PendingStatus : job status pending
|
||||
PendingStatus Status = "Pending"
|
||||
// RunningStatus : job status running
|
||||
RunningStatus Status = "Running"
|
||||
// StoppedStatus : job status stopped
|
||||
StoppedStatus Status = "Stopped"
|
||||
// ErrorStatus : job status error
|
||||
ErrorStatus Status = "Error"
|
||||
// SuccessStatus : job status success
|
||||
SuccessStatus Status = "Success"
|
||||
// ScheduledStatus : job status scheduled
|
||||
ScheduledStatus Status = "Scheduled"
|
||||
)
|
||||
|
||||
// Status of job
|
||||
type Status string
|
||||
|
||||
// Validate the status
|
||||
// If it's valid, then return nil error
|
||||
// otherwise an non nil error is returned
|
||||
func (s Status) Validate() error {
|
||||
if s.Code() == -1 {
|
||||
return fmt.Errorf("%s is not valid job status", s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Code of job status
|
||||
func (s Status) Code() int {
|
||||
switch s {
|
||||
case "Pending":
|
||||
return 0
|
||||
case "Scheduled":
|
||||
return 1
|
||||
case "Running":
|
||||
return 2
|
||||
// All final status share the same code
|
||||
// Each job will have only 1 final status
|
||||
case "Stopped":
|
||||
return 3
|
||||
case "Error":
|
||||
return 3
|
||||
case "Success":
|
||||
return 3
|
||||
default:
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// Compare the two job status
|
||||
// if < 0, s before another status
|
||||
// if == 0, same status
|
||||
// if > 0, s after another status
|
||||
func (s Status) Compare(another Status) int {
|
||||
return s.Code() - another.Code()
|
||||
}
|
||||
|
||||
// String returns the raw string value of the status
|
||||
func (s Status) String() string {
|
||||
return string(s)
|
||||
}
|
748
src/jobservice/job/tracker.go
Normal file
748
src/jobservice/job/tracker.go
Normal file
@ -0,0 +1,748 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Try best to keep the job stats data but anyway clear it after a long time
|
||||
statDataExpireTime = 180 * 24 * 3600
|
||||
)
|
||||
|
||||
// Tracker is designed to track the life cycle of the job described by the stats
|
||||
// The status change is linear and then has strict preorder and successor
|
||||
// Check should be enforced before switching
|
||||
//
|
||||
// Pending is default status when creating job, so no need to switch
|
||||
type Tracker interface {
|
||||
// Save the job stats which tracked by this tracker to the backend
|
||||
//
|
||||
// Return:
|
||||
// none nil error returned if any issues happened
|
||||
Save() error
|
||||
|
||||
// Load the job stats which tracked by this tracker with the backend data
|
||||
//
|
||||
// Return:
|
||||
// none nil error returned if any issues happened
|
||||
Load() error
|
||||
|
||||
// Get the job stats which tracked by this tracker
|
||||
//
|
||||
// Returns:
|
||||
// *models.Info : job stats data
|
||||
Job() *Stats
|
||||
|
||||
// Update the properties of the job stats
|
||||
//
|
||||
// fieldAndValues ...interface{} : One or more properties being updated
|
||||
//
|
||||
// Returns:
|
||||
// error if update failed
|
||||
Update(fieldAndValues ...interface{}) error
|
||||
|
||||
// Executions returns the executions of the job tracked by this tracker.
|
||||
// Please pay attention, this only for periodic job.
|
||||
//
|
||||
// Returns:
|
||||
// job execution IDs matched the query
|
||||
// the total number
|
||||
// error if any issues happened
|
||||
Executions(q *query.Parameter) ([]string, int64, error)
|
||||
|
||||
// NumericID returns the numeric ID of periodic job.
|
||||
// Please pay attention, this only for periodic job.
|
||||
NumericID() (int64, error)
|
||||
|
||||
// Mark the periodic job execution to done by update the score
|
||||
// of the relation between its periodic policy and execution to -1.
|
||||
PeriodicExecutionDone() error
|
||||
|
||||
// Check in message
|
||||
CheckIn(message string) error
|
||||
|
||||
// Update status with retry enabled
|
||||
UpdateStatusWithRetry(targetStatus Status) error
|
||||
|
||||
// The current status of job
|
||||
Status() (Status, error)
|
||||
|
||||
// Expire the job stats data
|
||||
Expire() error
|
||||
|
||||
// Switch status to running
|
||||
Run() error
|
||||
|
||||
// Switch status to stopped
|
||||
Stop() error
|
||||
|
||||
// Switch the status to error
|
||||
Fail() error
|
||||
|
||||
// Switch the status to success
|
||||
Succeed() error
|
||||
}
|
||||
|
||||
// basicTracker implements Tracker interface based on redis
|
||||
type basicTracker struct {
|
||||
namespace string
|
||||
context context.Context
|
||||
pool *redis.Pool
|
||||
jobID string
|
||||
jobStats *Stats
|
||||
callback HookCallback
|
||||
}
|
||||
|
||||
// NewBasicTrackerWithID builds a tracker with the provided job ID
|
||||
func NewBasicTrackerWithID(
|
||||
ctx context.Context,
|
||||
jobID string,
|
||||
ns string,
|
||||
pool *redis.Pool,
|
||||
callback HookCallback,
|
||||
) Tracker {
|
||||
return &basicTracker{
|
||||
namespace: ns,
|
||||
context: ctx,
|
||||
pool: pool,
|
||||
jobID: jobID,
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBasicTrackerWithStats builds a tracker with the provided job stats
|
||||
func NewBasicTrackerWithStats(
|
||||
ctx context.Context,
|
||||
stats *Stats,
|
||||
ns string,
|
||||
pool *redis.Pool,
|
||||
callback HookCallback,
|
||||
) Tracker {
|
||||
return &basicTracker{
|
||||
namespace: ns,
|
||||
context: ctx,
|
||||
pool: pool,
|
||||
jobStats: stats,
|
||||
jobID: stats.Info.JobID,
|
||||
callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
// Refresh the job stats which tracked by this tracker
|
||||
func (bt *basicTracker) Load() error {
|
||||
return bt.retrieve()
|
||||
}
|
||||
|
||||
// Job returns the job stats which tracked by this tracker
|
||||
func (bt *basicTracker) Job() *Stats {
|
||||
return bt.jobStats
|
||||
}
|
||||
|
||||
// Update the properties of the job stats
|
||||
func (bt *basicTracker) Update(fieldAndValues ...interface{}) error {
|
||||
if len(fieldAndValues) == 0 {
|
||||
return errors.New("no properties specified to update")
|
||||
}
|
||||
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyJobStats(bt.namespace, bt.jobID)
|
||||
args := []interface{}{"update_time", time.Now().Unix()} // update timestamp
|
||||
args = append(args, fieldAndValues...)
|
||||
|
||||
return rds.HmSet(conn, key, args...)
|
||||
}
|
||||
|
||||
// Status returns the current status of job tracked by this tracker
|
||||
func (bt *basicTracker) Status() (Status, error) {
|
||||
// Retrieve the latest status again in case get the outdated one.
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
rootKey := rds.KeyJobStats(bt.namespace, bt.jobID)
|
||||
return getStatus(conn, rootKey)
|
||||
}
|
||||
|
||||
// NumericID returns the numeric ID of the periodic job
|
||||
func (bt *basicTracker) NumericID() (int64, error) {
|
||||
if bt.jobStats.Info.NumericPID > 0 {
|
||||
return bt.jobStats.Info.NumericPID, nil
|
||||
}
|
||||
|
||||
return -1, errors.Errorf("numeric ID not found for job: %s", bt.jobID)
|
||||
}
|
||||
|
||||
// PeriodicExecutionDone mark the execution done
|
||||
func (bt *basicTracker) PeriodicExecutionDone() error {
|
||||
if utils.IsEmptyStr(bt.jobStats.Info.UpstreamJobID) {
|
||||
return errors.Errorf("%s is not periodic job execution", bt.jobID)
|
||||
}
|
||||
|
||||
key := rds.KeyUpstreamJobAndExecutions(bt.namespace, bt.jobStats.Info.UpstreamJobID)
|
||||
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
args := []interface{}{key, "XX", -1, bt.jobID}
|
||||
_, err := conn.Do("ZADD", args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Check in message
|
||||
func (bt *basicTracker) CheckIn(message string) error {
|
||||
if utils.IsEmptyStr(message) {
|
||||
return errors.New("check in error: empty message")
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
current := Status(bt.jobStats.Info.Status)
|
||||
|
||||
bt.refresh(current, message)
|
||||
err := bt.fireHookEvent(current, message)
|
||||
err = bt.Update(
|
||||
"check_in", message,
|
||||
"check_in_at", now,
|
||||
"update_time", now,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Executions of the tracked job
|
||||
func (bt *basicTracker) Executions(q *query.Parameter) ([]string, int64, error) {
|
||||
if bt.jobStats.Info.JobKind != KindPeriodic {
|
||||
return nil, 0, errors.New("only periodic job has executions")
|
||||
}
|
||||
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyUpstreamJobAndExecutions(bt.namespace, bt.jobID)
|
||||
|
||||
// Query executions by "non stopped"
|
||||
if nonStoppedOnly, ok := q.Extras.Get(query.ExtraParamKeyNonStoppedOnly); ok {
|
||||
if v, yes := nonStoppedOnly.(bool); yes && v {
|
||||
return queryExecutions(conn, key, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Pagination
|
||||
var pageNumber, pageSize uint = 1, query.DefaultPageSize
|
||||
if q != nil {
|
||||
if q.PageNumber > 0 {
|
||||
pageNumber = q.PageNumber
|
||||
}
|
||||
if q.PageSize > 0 {
|
||||
pageSize = q.PageSize
|
||||
}
|
||||
}
|
||||
|
||||
// Get total first
|
||||
total, err := redis.Int64(conn.Do("ZCARD", key))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// No items
|
||||
result := make([]string, 0)
|
||||
if total == 0 || (int64)((pageNumber-1)*pageSize) >= total {
|
||||
return result, total, nil
|
||||
}
|
||||
|
||||
min, max := (pageNumber-1)*pageSize, pageNumber*pageSize-1
|
||||
args := []interface{}{key, min, max}
|
||||
list, err := redis.Values(conn.Do("ZREVRANGE", args...))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, item := range list {
|
||||
if eID, ok := item.([]byte); ok {
|
||||
result = append(result, string(eID))
|
||||
}
|
||||
}
|
||||
|
||||
return result, total, nil
|
||||
}
|
||||
|
||||
// Expire job stats
|
||||
func (bt *basicTracker) Expire() error {
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyJobStats(bt.namespace, bt.jobID)
|
||||
num, err := conn.Do("EXPIRE", key, statDataExpireTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if num == 0 {
|
||||
return errors.Errorf("job stats for expiring %s does not exist", bt.jobID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run job
|
||||
// Either one is failed, the final return will be marked as failed.
|
||||
func (bt *basicTracker) Run() error {
|
||||
err := bt.compareAndSet(RunningStatus)
|
||||
if !errs.IsStatusMismatchError(err) {
|
||||
bt.refresh(RunningStatus)
|
||||
if er := bt.fireHookEvent(RunningStatus); err == nil && er != nil {
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop job
|
||||
// Stop is final status, if failed to do, retry should be enforced.
|
||||
// Either one is failed, the final return will be marked as failed.
|
||||
func (bt *basicTracker) Stop() error {
|
||||
err := bt.UpdateStatusWithRetry(StoppedStatus)
|
||||
if !errs.IsStatusMismatchError(err) {
|
||||
bt.refresh(StoppedStatus)
|
||||
if er := bt.fireHookEvent(StoppedStatus); err == nil && er != nil {
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Fail job
|
||||
// Fail is final status, if failed to do, retry should be enforced.
|
||||
// Either one is failed, the final return will be marked as failed.
|
||||
func (bt *basicTracker) Fail() error {
|
||||
err := bt.UpdateStatusWithRetry(ErrorStatus)
|
||||
if !errs.IsStatusMismatchError(err) {
|
||||
bt.refresh(ErrorStatus)
|
||||
if er := bt.fireHookEvent(ErrorStatus); err == nil && er != nil {
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Succeed job
|
||||
// Succeed is final status, if failed to do, retry should be enforced.
|
||||
// Either one is failed, the final return will be marked as failed.
|
||||
func (bt *basicTracker) Succeed() error {
|
||||
err := bt.UpdateStatusWithRetry(SuccessStatus)
|
||||
if !errs.IsStatusMismatchError(err) {
|
||||
bt.refresh(SuccessStatus)
|
||||
if er := bt.fireHookEvent(SuccessStatus); err == nil && er != nil {
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the stats of job tracked by this tracker
|
||||
func (bt *basicTracker) Save() (err error) {
|
||||
if bt.jobStats == nil {
|
||||
return errors.New("nil job stats to save")
|
||||
}
|
||||
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
// Alliance
|
||||
stats := bt.jobStats
|
||||
|
||||
key := rds.KeyJobStats(bt.namespace, stats.Info.JobID)
|
||||
args := make([]interface{}, 0)
|
||||
args = append(args, key)
|
||||
args = append(args,
|
||||
"id", stats.Info.JobID,
|
||||
"name", stats.Info.JobName,
|
||||
"kind", stats.Info.JobKind,
|
||||
"unique", stats.Info.IsUnique,
|
||||
"status", stats.Info.Status,
|
||||
"ref_link", stats.Info.RefLink,
|
||||
"enqueue_time", stats.Info.EnqueueTime,
|
||||
"run_at", stats.Info.RunAt,
|
||||
"cron_spec", stats.Info.CronSpec,
|
||||
"web_hook_url", stats.Info.WebHookURL,
|
||||
"numeric_policy_id", stats.Info.NumericPID,
|
||||
)
|
||||
if stats.Info.CheckInAt > 0 && !utils.IsEmptyStr(stats.Info.CheckIn) {
|
||||
args = append(args,
|
||||
"check_in", stats.Info.CheckIn,
|
||||
"check_in_at", stats.Info.CheckInAt,
|
||||
)
|
||||
}
|
||||
if stats.Info.DieAt > 0 {
|
||||
args = append(args, "die_at", stats.Info.DieAt)
|
||||
}
|
||||
|
||||
if !utils.IsEmptyStr(stats.Info.UpstreamJobID) {
|
||||
args = append(args, "upstream_job_id", stats.Info.UpstreamJobID)
|
||||
}
|
||||
|
||||
if len(stats.Info.Parameters) > 0 {
|
||||
if bytes, err := json.Marshal(&stats.Info.Parameters); err == nil {
|
||||
args = append(args, "parameters", string(bytes))
|
||||
}
|
||||
}
|
||||
// Set update timestamp
|
||||
args = append(args, "update_time", time.Now().Unix())
|
||||
|
||||
// Do it in a transaction
|
||||
err = conn.Send("MULTI")
|
||||
err = conn.Send("HMSET", args...)
|
||||
|
||||
// If job kind is periodic job, expire time should not be set
|
||||
// If job kind is scheduled job, expire time should be runAt+
|
||||
if stats.Info.JobKind != KindPeriodic {
|
||||
var expireTime int64 = statDataExpireTime
|
||||
if stats.Info.JobKind == KindScheduled {
|
||||
nowTime := time.Now().Unix()
|
||||
future := stats.Info.RunAt - nowTime
|
||||
if future > 0 {
|
||||
expireTime += future
|
||||
}
|
||||
}
|
||||
expireTime += rand.Int63n(15) // Avoid lots of keys being expired at the same time
|
||||
err = conn.Send("EXPIRE", key, expireTime)
|
||||
}
|
||||
|
||||
// Link with its upstream job if upstream job ID exists for future querying
|
||||
if !utils.IsEmptyStr(stats.Info.UpstreamJobID) {
|
||||
k := rds.KeyUpstreamJobAndExecutions(bt.namespace, stats.Info.UpstreamJobID)
|
||||
zargs := []interface{}{k, "NX", stats.Info.RunAt, stats.Info.JobID}
|
||||
err = conn.Send("ZADD", zargs...)
|
||||
}
|
||||
|
||||
// Check command send error only once here before executing
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = conn.Do("EXEC")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatusWithRetry updates the status with retry enabled.
|
||||
// If update status failed, then retry if permitted.
|
||||
// Try best to do
|
||||
func (bt *basicTracker) UpdateStatusWithRetry(targetStatus Status) error {
|
||||
err := bt.compareAndSet(targetStatus)
|
||||
if err != nil {
|
||||
// Status mismatching error will be ignored
|
||||
if !errs.IsStatusMismatchError(err) {
|
||||
// Push to the retrying Q
|
||||
if er := bt.pushToQueueForRetry(targetStatus); er != nil {
|
||||
logger.Errorf("push job status update request to retry queue error: %s", er)
|
||||
// If failed to put it into the retrying Q in case, let's downgrade to retry in current process
|
||||
// by recursively call in goroutines.
|
||||
bt.retryUpdateStatus(targetStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Refresh the job stats in mem
|
||||
func (bt *basicTracker) refresh(targetStatus Status, checkIn ...string) {
|
||||
now := time.Now().Unix()
|
||||
|
||||
bt.jobStats.Info.Status = targetStatus.String()
|
||||
if len(checkIn) > 0 {
|
||||
bt.jobStats.Info.CheckIn = checkIn[0]
|
||||
bt.jobStats.Info.CheckInAt = now
|
||||
}
|
||||
bt.jobStats.Info.UpdateTime = now
|
||||
}
|
||||
|
||||
// FireHookEvent fires the hook event
|
||||
func (bt *basicTracker) fireHookEvent(status Status, checkIn ...string) error {
|
||||
// Check if hook URL is registered
|
||||
if utils.IsEmptyStr(bt.jobStats.Info.WebHookURL) {
|
||||
// Do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
change := &StatusChange{
|
||||
JobID: bt.jobID,
|
||||
Status: status.String(),
|
||||
Metadata: bt.jobStats.Info,
|
||||
}
|
||||
|
||||
if len(checkIn) > 0 {
|
||||
change.CheckIn = checkIn[0]
|
||||
}
|
||||
|
||||
// If callback is registered, then trigger now
|
||||
if bt.callback != nil {
|
||||
return bt.callback(bt.jobStats.Info.WebHookURL, change)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bt *basicTracker) pushToQueueForRetry(targetStatus Status) error {
|
||||
simpleStatusChange := &SimpleStatusChange{
|
||||
JobID: bt.jobID,
|
||||
TargetStatus: targetStatus.String(),
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(simpleStatusChange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyStatusUpdateRetryQueue(bt.namespace)
|
||||
args := []interface{}{key, "NX", time.Now().Unix(), rawJSON}
|
||||
|
||||
_, err = conn.Do("ZADD", args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (bt *basicTracker) retryUpdateStatus(targetStatus Status) {
|
||||
go func() {
|
||||
select {
|
||||
case <-time.After(time.Duration(5)*time.Minute + time.Duration(rand.Int31n(13))*time.Second):
|
||||
// Check the update timestamp
|
||||
if time.Now().Unix()-bt.jobStats.Info.UpdateTime < statDataExpireTime-24*3600 {
|
||||
if err := bt.compareAndSet(targetStatus); err != nil {
|
||||
logger.Errorf("Retry to update job status error: %s", err)
|
||||
bt.retryUpdateStatus(targetStatus)
|
||||
}
|
||||
// Success
|
||||
}
|
||||
return
|
||||
case <-bt.context.Done():
|
||||
return // terminated
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (bt *basicTracker) compareAndSet(targetStatus Status) error {
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
rootKey := rds.KeyJobStats(bt.namespace, bt.jobID)
|
||||
|
||||
st, err := getStatus(conn, rootKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diff := st.Compare(targetStatus)
|
||||
if diff > 0 {
|
||||
return errs.StatusMismatchError(st.String(), targetStatus.String())
|
||||
}
|
||||
if diff == 0 {
|
||||
// Desired matches actual
|
||||
return nil
|
||||
}
|
||||
|
||||
return setStatus(conn, rootKey, targetStatus)
|
||||
}
|
||||
|
||||
// retrieve the stats of job tracked by this tracker from the backend data
|
||||
func (bt *basicTracker) retrieve() error {
|
||||
conn := bt.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyJobStats(bt.namespace, bt.jobID)
|
||||
vals, err := redis.Strings(conn.Do("HGETALL", key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if vals == nil || len(vals) == 0 {
|
||||
return errs.NoObjectFoundError(bt.jobID)
|
||||
}
|
||||
|
||||
res := &Stats{
|
||||
Info: &StatsInfo{},
|
||||
}
|
||||
|
||||
for i, l := 0, len(vals); i < l; i = i + 2 {
|
||||
prop := vals[i]
|
||||
value := vals[i+1]
|
||||
switch prop {
|
||||
case "id":
|
||||
res.Info.JobID = value
|
||||
break
|
||||
case "name":
|
||||
res.Info.JobName = value
|
||||
break
|
||||
case "kind":
|
||||
res.Info.JobKind = value
|
||||
case "unique":
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
v = false
|
||||
}
|
||||
res.Info.IsUnique = v
|
||||
case "status":
|
||||
res.Info.Status = value
|
||||
break
|
||||
case "ref_link":
|
||||
res.Info.RefLink = value
|
||||
break
|
||||
case "enqueue_time":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.EnqueueTime = v
|
||||
break
|
||||
case "update_time":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.UpdateTime = v
|
||||
break
|
||||
case "run_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.RunAt = v
|
||||
break
|
||||
case "check_in_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.CheckInAt = v
|
||||
break
|
||||
case "check_in":
|
||||
res.Info.CheckIn = value
|
||||
break
|
||||
case "cron_spec":
|
||||
res.Info.CronSpec = value
|
||||
break
|
||||
case "web_hook_url":
|
||||
res.Info.WebHookURL = value
|
||||
break
|
||||
case "die_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.DieAt = v
|
||||
case "upstream_job_id":
|
||||
res.Info.UpstreamJobID = value
|
||||
break
|
||||
case "numeric_policy_id":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Info.NumericPID = v
|
||||
break
|
||||
case "parameters":
|
||||
params := make(Parameters)
|
||||
if err := json.Unmarshal([]byte(value), ¶ms); err == nil {
|
||||
res.Info.Parameters = params
|
||||
}
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
bt.jobStats = res
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStatus(conn redis.Conn, key string) (Status, error) {
|
||||
values, err := rds.HmGet(conn, key, "status")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(values) == 1 {
|
||||
st := Status(values[0].([]byte))
|
||||
if st.Validate() == nil {
|
||||
return st, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errors.New("malformed status data returned")
|
||||
}
|
||||
|
||||
func setStatus(conn redis.Conn, key string, status Status) error {
|
||||
return rds.HmSet(conn, key, "status", status.String(), "update_time", time.Now().Unix())
|
||||
}
|
||||
|
||||
// queryExecutions queries periodic executions by status
|
||||
func queryExecutions(conn redis.Conn, dataKey string, q *query.Parameter) ([]string, int64, error) {
|
||||
total, err := redis.Int64(conn.Do("ZCOUNT", dataKey, 0, "+inf"))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var pageNumber, pageSize uint = 1, query.DefaultPageSize
|
||||
if q.PageNumber > 0 {
|
||||
pageNumber = q.PageNumber
|
||||
}
|
||||
if q.PageSize > 0 {
|
||||
pageSize = q.PageSize
|
||||
}
|
||||
|
||||
results := make([]string, 0)
|
||||
if total == 0 || (int64)((pageNumber-1)*pageSize) >= total {
|
||||
return results, total, nil
|
||||
}
|
||||
|
||||
offset := (pageNumber - 1) * pageSize
|
||||
args := []interface{}{dataKey, "+inf", 0, "LIMIT", offset, pageSize}
|
||||
|
||||
eIDs, err := redis.Values(conn.Do("ZREVRANGEBYSCORE", args...))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, eID := range eIDs {
|
||||
if eIDBytes, ok := eID.([]byte); ok {
|
||||
results = append(results, string(eIDBytes))
|
||||
}
|
||||
}
|
||||
|
||||
return results, total, nil
|
||||
}
|
218
src/jobservice/job/tracker_test.go
Normal file
218
src/jobservice/job/tracker_test.go
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// TrackerTestSuite tests functions of tracker
|
||||
type TrackerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
}
|
||||
|
||||
// TestTrackerTestSuite is entry of go test
|
||||
func TestTrackerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(TrackerTestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *TrackerTestSuite) SetupSuite() {
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
}
|
||||
|
||||
// TearDownSuite prepares test suites
|
||||
func (suite *TrackerTestSuite) TearDownSuite() {
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestTracker tests tracker
|
||||
func (suite *TrackerTestSuite) TestTracker() {
|
||||
jobID := utils.MakeIdentifier()
|
||||
mockJobStats := &Stats{
|
||||
Info: &StatsInfo{
|
||||
JobID: jobID,
|
||||
Status: SuccessStatus.String(),
|
||||
JobKind: KindGeneric,
|
||||
JobName: SampleJob,
|
||||
IsUnique: false,
|
||||
},
|
||||
}
|
||||
|
||||
tracker := NewBasicTrackerWithStats(
|
||||
context.TODO(),
|
||||
mockJobStats,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
func(hookURL string, change *StatusChange) error {
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
err := tracker.Save()
|
||||
require.Nil(suite.T(), err, "save: nil error expected but got %s", err)
|
||||
|
||||
s, err := tracker.Status()
|
||||
assert.Nil(suite.T(), err, "get status: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), SuccessStatus, s, "get status: expected pending but got %s", s)
|
||||
|
||||
j := tracker.Job()
|
||||
assert.Equal(suite.T(), jobID, j.Info.JobID, "job: expect job ID %s but got %s", jobID, j.Info.JobID)
|
||||
|
||||
err = tracker.Update("web_hook_url", "http://hook.url")
|
||||
assert.Nil(suite.T(), err, "update: nil error expected but got %s", err)
|
||||
|
||||
err = tracker.Load()
|
||||
assert.Nil(suite.T(), err, "load: nil error expected but got %s", err)
|
||||
assert.Equal(
|
||||
suite.T(),
|
||||
"http://hook.url",
|
||||
tracker.Job().Info.WebHookURL,
|
||||
"web hook: expect %s but got %s",
|
||||
"http://hook.url",
|
||||
tracker.Job().Info.WebHookURL,
|
||||
)
|
||||
|
||||
err = tracker.Run()
|
||||
assert.Error(suite.T(), err, "run: non nil error expected but got nil")
|
||||
err = tracker.CheckIn("check in")
|
||||
assert.Nil(suite.T(), err, "check in: nil error expected but got %s", err)
|
||||
err = tracker.Succeed()
|
||||
assert.Nil(suite.T(), err, "succeed: nil error expected but got %s", err)
|
||||
err = tracker.Stop()
|
||||
assert.Nil(suite.T(), err, "stop: nil error expected but got %s", err)
|
||||
err = tracker.Fail()
|
||||
assert.Nil(suite.T(), err, "fail: nil error expected but got %s", err)
|
||||
|
||||
t := NewBasicTrackerWithID(
|
||||
context.TODO(),
|
||||
jobID,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
func(hookURL string, change *StatusChange) error {
|
||||
return nil
|
||||
},
|
||||
)
|
||||
err = t.Load()
|
||||
assert.NoError(suite.T(), err)
|
||||
|
||||
err = t.Expire()
|
||||
assert.NoError(suite.T(), err)
|
||||
}
|
||||
|
||||
// TestPeriodicTracker tests tracker of periodic
|
||||
func (suite *TrackerTestSuite) TestPeriodicTracker() {
|
||||
jobID := utils.MakeIdentifier()
|
||||
nID := time.Now().Unix()
|
||||
mockJobStats := &Stats{
|
||||
Info: &StatsInfo{
|
||||
JobID: jobID,
|
||||
Status: ScheduledStatus.String(),
|
||||
JobKind: KindPeriodic,
|
||||
JobName: SampleJob,
|
||||
IsUnique: false,
|
||||
CronSpec: "0 0 * * * *",
|
||||
NumericPID: nID,
|
||||
},
|
||||
}
|
||||
|
||||
t := NewBasicTrackerWithStats(context.TODO(), mockJobStats, suite.namespace, suite.pool, nil)
|
||||
err := t.Save()
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
executionID := utils.MakeIdentifier()
|
||||
runAt := time.Now().Add(1 * time.Hour).Unix()
|
||||
executionStats := &Stats{
|
||||
Info: &StatsInfo{
|
||||
JobID: executionID,
|
||||
Status: ScheduledStatus.String(),
|
||||
JobKind: KindScheduled,
|
||||
JobName: SampleJob,
|
||||
IsUnique: false,
|
||||
CronSpec: "0 0 * * * *",
|
||||
RunAt: runAt,
|
||||
EnqueueTime: runAt,
|
||||
UpstreamJobID: jobID,
|
||||
},
|
||||
}
|
||||
|
||||
t2 := NewBasicTrackerWithStats(context.TODO(), executionStats, suite.namespace, suite.pool, nil)
|
||||
err = t2.Save()
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
id, err := t.NumericID()
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), nID, id)
|
||||
|
||||
_, total, err := t.Executions(&query.Parameter{
|
||||
PageNumber: 1,
|
||||
PageSize: 10,
|
||||
Extras: make(query.ExtraParameters),
|
||||
})
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), int64(1), total)
|
||||
|
||||
err = t2.PeriodicExecutionDone()
|
||||
require.NoError(suite.T(), err)
|
||||
}
|
||||
|
||||
// TestPushForRetry tests push for retry
|
||||
func (suite *TrackerTestSuite) TestPushForRetry() {
|
||||
ID := utils.MakeIdentifier()
|
||||
runAt := time.Now().Add(1 * time.Hour).Unix()
|
||||
jobStats := &Stats{
|
||||
Info: &StatsInfo{
|
||||
JobID: ID,
|
||||
Status: ScheduledStatus.String(),
|
||||
JobKind: KindScheduled,
|
||||
JobName: SampleJob,
|
||||
IsUnique: false,
|
||||
RunAt: runAt,
|
||||
EnqueueTime: runAt,
|
||||
},
|
||||
}
|
||||
|
||||
t := &basicTracker{
|
||||
namespace: suite.namespace,
|
||||
context: context.TODO(),
|
||||
pool: suite.pool,
|
||||
jobID: ID,
|
||||
jobStats: jobStats,
|
||||
callback: nil,
|
||||
}
|
||||
|
||||
err := t.pushToQueueForRetry(RunningStatus)
|
||||
require.NoError(suite.T(), err)
|
||||
}
|
@ -12,11 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package impl
|
||||
package job
|
||||
|
||||
// Define the register name constants of known jobs
|
||||
|
||||
const (
|
||||
// KnownJobDemo is name of demo job
|
||||
KnownJobDemo = "DEMO"
|
||||
)
|
||||
// HookCallback defines a callback to trigger when hook events happened
|
||||
type HookCallback func(hookURL string, change *StatusChange) error
|
179
src/jobservice/lcm/controller.go
Normal file
179
src/jobservice/lcm/controller.go
Normal file
@ -0,0 +1,179 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lcm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Waiting a short while if any errors occurred
|
||||
shortLoopInterval = 5 * time.Second
|
||||
// Waiting for long while if no retrying elements found
|
||||
longLoopInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Controller is designed to control the life cycle of the job
|
||||
type Controller interface {
|
||||
// Run daemon process if needed
|
||||
Serve() error
|
||||
|
||||
// New tracker from the new provided stats
|
||||
New(stats *job.Stats) (job.Tracker, error)
|
||||
|
||||
// Track the life cycle of the specified existing job
|
||||
Track(jobID string) (job.Tracker, error)
|
||||
}
|
||||
|
||||
// basicController is default implementation of Controller based on redis
|
||||
type basicController struct {
|
||||
context context.Context
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
callback job.HookCallback
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewController is the constructor of basic controller
|
||||
func NewController(ctx *env.Context, ns string, pool *redis.Pool, callback job.HookCallback) Controller {
|
||||
return &basicController{
|
||||
context: ctx.SystemContext,
|
||||
namespace: ns,
|
||||
pool: pool,
|
||||
callback: callback,
|
||||
wg: ctx.WG,
|
||||
}
|
||||
}
|
||||
|
||||
// Serve ...
|
||||
func (bc *basicController) Serve() error {
|
||||
bc.wg.Add(1)
|
||||
go bc.loopForRestoreDeadStatus()
|
||||
|
||||
logger.Info("Status restoring loop is started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New tracker
|
||||
func (bc *basicController) New(stats *job.Stats) (job.Tracker, error) {
|
||||
if stats == nil {
|
||||
return nil, errors.New("nil stats when creating job tracker")
|
||||
}
|
||||
|
||||
if err := stats.Validate(); err != nil {
|
||||
return nil, errors.Errorf("error occurred when creating job tracker: %s", err)
|
||||
}
|
||||
|
||||
bt := job.NewBasicTrackerWithStats(bc.context, stats, bc.namespace, bc.pool, bc.callback)
|
||||
if err := bt.Save(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
// Track and attache with the job
|
||||
func (bc *basicController) Track(jobID string) (job.Tracker, error) {
|
||||
bt := job.NewBasicTrackerWithID(bc.context, jobID, bc.namespace, bc.pool, bc.callback)
|
||||
if err := bt.Load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
// loopForRestoreDeadStatus is a loop to restore the dead states of jobs
|
||||
func (bc *basicController) loopForRestoreDeadStatus() {
|
||||
defer func() {
|
||||
logger.Info("Status restoring loop is stopped")
|
||||
bc.wg.Done()
|
||||
}()
|
||||
|
||||
token := make(chan bool, 1)
|
||||
token <- true
|
||||
|
||||
for {
|
||||
<-token
|
||||
|
||||
if err := bc.restoreDeadStatus(); err != nil {
|
||||
waitInterval := shortLoopInterval
|
||||
if err == rds.ErrNoElements {
|
||||
// No elements
|
||||
waitInterval = longLoopInterval
|
||||
} else {
|
||||
logger.Errorf("restore dead status error: %s, put it back to the retrying Q later again", err)
|
||||
}
|
||||
|
||||
// wait for a while or be terminated
|
||||
select {
|
||||
case <-time.After(waitInterval):
|
||||
case <-bc.context.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Return token
|
||||
token <- true
|
||||
}
|
||||
}
|
||||
|
||||
// restoreDeadStatus try to restore the dead status
|
||||
func (bc *basicController) restoreDeadStatus() error {
|
||||
// Get one
|
||||
deadOne, err := bc.popOneDead()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Try to update status
|
||||
t, err := bc.Track(deadOne.JobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.UpdateStatusWithRetry(job.Status(deadOne.TargetStatus))
|
||||
}
|
||||
|
||||
// popOneDead retrieves one dead status from the backend Q from lowest to highest
|
||||
func (bc *basicController) popOneDead() (*job.SimpleStatusChange, error) {
|
||||
conn := bc.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
key := rds.KeyStatusUpdateRetryQueue(bc.namespace)
|
||||
v, err := rds.ZPopMin(conn, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bytes, ok := v.([]byte); ok {
|
||||
ssc := &job.SimpleStatusChange{}
|
||||
if err := json.Unmarshal(bytes, ssc); err == nil {
|
||||
return ssc, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("pop one dead error: bad result reply")
|
||||
}
|
123
src/jobservice/lcm/controller_test.go
Normal file
123
src/jobservice/lcm/controller_test.go
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package lcm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LcmControllerTestSuite tests functions of life cycle controller
|
||||
type LcmControllerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
ctl Controller
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *LcmControllerTestSuite) SetupSuite() {
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
suite.cancel = cancel
|
||||
envCtx := &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
}
|
||||
suite.ctl = NewController(envCtx, suite.namespace, suite.pool, func(hookURL string, change *job.StatusChange) error { return nil })
|
||||
}
|
||||
|
||||
// TearDownSuite clears test suite
|
||||
func (suite *LcmControllerTestSuite) TearDownSuite() {
|
||||
suite.cancel()
|
||||
}
|
||||
|
||||
// TestLcmControllerTestSuite is entry of go test
|
||||
func TestLcmControllerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(LcmControllerTestSuite))
|
||||
}
|
||||
|
||||
// TestNewAndTrack tests controller.New() and controller.Track()
|
||||
func (suite *LcmControllerTestSuite) TestNewAndTrack() {
|
||||
jobID := utils.MakeIdentifier()
|
||||
suite.newsStats(jobID)
|
||||
|
||||
t, err := suite.ctl.Track(jobID)
|
||||
require.Nil(suite.T(), err, "lcm track: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), job.SampleJob, t.Job().Info.JobName, "lcm new: expect job name %s but got %s", job.SampleJob, t.Job().Info.JobName)
|
||||
}
|
||||
|
||||
// TestNew tests controller.Serve()
|
||||
func (suite *LcmControllerTestSuite) TestServe() {
|
||||
// Prepare mock data
|
||||
jobID := utils.MakeIdentifier()
|
||||
suite.newsStats(jobID)
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
simpleChange := &job.SimpleStatusChange{
|
||||
JobID: jobID,
|
||||
TargetStatus: job.RunningStatus.String(),
|
||||
}
|
||||
rawJSON, err := json.Marshal(simpleChange)
|
||||
require.Nil(suite.T(), err, "json marshal: nil error expected but got %s", err)
|
||||
key := rds.KeyStatusUpdateRetryQueue(suite.namespace)
|
||||
args := []interface{}{key, "NX", time.Now().Unix(), rawJSON}
|
||||
_, err = conn.Do("ZADD", args...)
|
||||
require.Nil(suite.T(), err, "prepare mock data: nil error expected but got %s", err)
|
||||
|
||||
err = suite.ctl.Serve()
|
||||
require.NoError(suite.T(), err, "lcm: nil error expected but got %s", err)
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
count, err := redis.Int(conn.Do("ZCARD", key))
|
||||
require.Nil(suite.T(), err, "get total dead status: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), 0, count)
|
||||
}
|
||||
|
||||
// newsStats create job stats
|
||||
func (suite *LcmControllerTestSuite) newsStats(jobID string) {
|
||||
stats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: jobID,
|
||||
JobKind: job.KindGeneric,
|
||||
JobName: job.SampleJob,
|
||||
IsUnique: true,
|
||||
Status: job.PendingStatus.String(),
|
||||
},
|
||||
}
|
||||
|
||||
t, err := suite.ctl.New(stats)
|
||||
require.Nil(suite.T(), err, "lcm new: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), jobID, t.Job().Info.JobID, "lcm new: expect job ID %s but got %s", jobID, t.Job().Info.JobID)
|
||||
}
|
@ -49,14 +49,15 @@ func TestDBLogger(t *testing.T) {
|
||||
l.Warningf("JobLog Warningf: %s", "TestDBLogger")
|
||||
l.Errorf("JobLog Errorf: %s", "TestDBLogger")
|
||||
|
||||
l.Close()
|
||||
_ = l.Close()
|
||||
|
||||
dbGetter := getter.NewDBGetter()
|
||||
ll, err := dbGetter.Retrieve(uuid)
|
||||
require.Nil(t, err)
|
||||
log.Infof("get logger %s", ll)
|
||||
|
||||
sweeper.PrepareDBSweep()
|
||||
err = sweeper.PrepareDBSweep()
|
||||
require.NoError(t, err)
|
||||
dbSweeper := sweeper.NewDBSweeper(-1)
|
||||
count, err := dbSweeper.Sweep()
|
||||
require.Nil(t, err)
|
||||
|
@ -38,12 +38,12 @@ func GetLogger(loggerOptions ...Option) (Interface, error) {
|
||||
|
||||
// No options specified, enable std as default
|
||||
if len(loggerOptions) == 0 {
|
||||
defaultOp := BackendOption(LoggerNameStdOutput, "", nil)
|
||||
defaultOp := BackendOption(NameStdOutput, "", nil)
|
||||
defaultOp.Apply(lOptions)
|
||||
}
|
||||
|
||||
// Create backends
|
||||
loggers := []Interface{}
|
||||
loggers := make([]Interface, 0)
|
||||
for name, ops := range lOptions.values {
|
||||
if !IsKnownLogger(name) {
|
||||
return nil, fmt.Errorf("no logger registered for name '%s'", name)
|
||||
@ -105,7 +105,7 @@ func GetSweeper(context context.Context, sweeperOptions ...Option) (sweeper.Inte
|
||||
op.Apply(sOptions)
|
||||
}
|
||||
|
||||
sweepers := []sweeper.Interface{}
|
||||
sweepers := make([]sweeper.Interface, 0)
|
||||
for name, ops := range sOptions.values {
|
||||
if !HasSweeper(name) {
|
||||
return nil, fmt.Errorf("no sweeper provided for the logger %s", name)
|
||||
@ -147,7 +147,7 @@ func GetLogDataGetter(loggerOptions ...Option) (getter.Interface, error) {
|
||||
}
|
||||
|
||||
// Iterate with specified order
|
||||
keys := []string{}
|
||||
keys := make([]string, 0)
|
||||
for k := range lOptions.values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@ -175,14 +175,14 @@ func GetLogDataGetter(loggerOptions ...Option) (getter.Interface, error) {
|
||||
// Init the loggers and sweepers
|
||||
func Init(ctx context.Context) error {
|
||||
// For loggers
|
||||
options := []Option{}
|
||||
options := make([]Option, 0)
|
||||
// For sweepers
|
||||
sOptions := []Option{}
|
||||
sOptions := make([]Option, 0)
|
||||
|
||||
for _, lc := range config.DefaultConfig.LoggerConfigs {
|
||||
// Inject logger depth here for FILE and STD logger to avoid configuring it in the yaml
|
||||
// For logger of job service itself, the depth should be 6
|
||||
if lc.Name == LoggerNameFile || lc.Name == LoggerNameStdOutput {
|
||||
if lc.Name == NameFile || lc.Name == NameStdOutput {
|
||||
if lc.Settings == nil {
|
||||
lc.Settings = map[string]interface{}{}
|
||||
}
|
||||
@ -202,7 +202,7 @@ func Init(ctx context.Context) error {
|
||||
// Avoid data race issue
|
||||
singletons.Store(systemKeyServiceLogger, lg)
|
||||
|
||||
jOptions := []Option{}
|
||||
jOptions := make([]Option, 0)
|
||||
// Append configured sweepers in job loggers if existing
|
||||
for _, lc := range config.DefaultConfig.JobLoggerConfigs {
|
||||
jOptions = append(jOptions, BackendOption(lc.Name, lc.Level, lc.Settings))
|
||||
@ -224,12 +224,12 @@ func Init(ctx context.Context) error {
|
||||
// If sweepers configured
|
||||
if len(sOptions) > 0 {
|
||||
// Get the sweeper controller
|
||||
sweeper, err := GetSweeper(ctx, sOptions...)
|
||||
swp, err := GetSweeper(ctx, sOptions...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create logger sweeper error: %s", err)
|
||||
}
|
||||
// Start sweep loop
|
||||
_, err = sweeper.Sweep()
|
||||
_, err = swp.Sweep()
|
||||
if err != nil {
|
||||
return fmt.Errorf("start logger sweeper error: %s", err)
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ func TestGetLoggersMulti(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
ops := []Option{}
|
||||
ops := make([]Option, 0)
|
||||
ops = append(
|
||||
ops,
|
||||
BackendOption("STD_OUTPUT", "DEBUG", nil),
|
||||
|
@ -17,7 +17,7 @@ func TestFileFactory(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
if closer, ok := ff.(Closer); ok {
|
||||
closer.Close()
|
||||
_ = closer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,14 +39,16 @@ func TestDBGetter(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
l.Debug("JobLog Debug: TestDBLoggerGetter")
|
||||
l.Close()
|
||||
err = l.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
dbGetter := NewDBGetter()
|
||||
ll, err := dbGetter.Retrieve(uuid)
|
||||
require.Nil(t, err)
|
||||
log.Infof("get logger %s", ll)
|
||||
|
||||
sweeper.PrepareDBSweep()
|
||||
err = sweeper.PrepareDBSweep()
|
||||
require.NoError(t, err)
|
||||
dbSweeper := sweeper.NewDBSweeper(-1)
|
||||
count, err := dbSweeper.Sweep()
|
||||
require.Nil(t, err)
|
||||
@ -60,7 +62,8 @@ func TestDBGetterError(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
l.Debug("JobLog Debug: TestDBLoggerGetter")
|
||||
l.Close()
|
||||
err = l.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
dbGetter := NewDBGetter()
|
||||
_, err = dbGetter.Retrieve("")
|
||||
@ -68,7 +71,8 @@ func TestDBGetterError(t *testing.T) {
|
||||
_, err = dbGetter.Retrieve("not_exist_uuid")
|
||||
require.NotNil(t, err)
|
||||
|
||||
sweeper.PrepareDBSweep()
|
||||
err = sweeper.PrepareDBSweep()
|
||||
require.NoError(t, err)
|
||||
dbSweeper := sweeper.NewDBSweeper(-1)
|
||||
count, err := dbSweeper.Sweep()
|
||||
require.Nil(t, err)
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
)
|
||||
|
||||
// FileGetter is responsible for retrieving file log data
|
||||
|
@ -7,12 +7,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// LoggerNameFile is unique name of the file logger.
|
||||
LoggerNameFile = "FILE"
|
||||
// LoggerNameStdOutput is the unique name of the std logger.
|
||||
LoggerNameStdOutput = "STD_OUTPUT"
|
||||
// LoggerNameDB is the unique name of the DB logger.
|
||||
LoggerNameDB = "DB"
|
||||
// NameFile is unique name of the file logger.
|
||||
NameFile = "FILE"
|
||||
// NameStdOutput is the unique name of the std logger.
|
||||
NameStdOutput = "STD_OUTPUT"
|
||||
// NameDB is the unique name of the DB logger.
|
||||
NameDB = "DB"
|
||||
)
|
||||
|
||||
// Declaration is used to declare a supported logger.
|
||||
@ -31,11 +31,11 @@ type Declaration struct {
|
||||
// log info.
|
||||
var knownLoggers = map[string]*Declaration{
|
||||
// File logger
|
||||
LoggerNameFile: {FileFactory, FileSweeperFactory, FileGetterFactory, false},
|
||||
NameFile: {FileFactory, FileSweeperFactory, FileGetterFactory, false},
|
||||
// STD output(both stdout and stderr) logger
|
||||
LoggerNameStdOutput: {StdFactory, nil, nil, true},
|
||||
NameStdOutput: {StdFactory, nil, nil, true},
|
||||
// DB logger
|
||||
LoggerNameDB: {DBFactory, DBSweeperFactory, DBGetterFactory, false},
|
||||
NameDB: {DBFactory, DBSweeperFactory, DBGetterFactory, false},
|
||||
}
|
||||
|
||||
// IsKnownLogger checks if the logger is supported with name.
|
||||
@ -97,11 +97,11 @@ func GetLoggerName(l Interface) string {
|
||||
|
||||
switch l.(type) {
|
||||
case *backend.DBLogger:
|
||||
name = LoggerNameDB
|
||||
name = NameDB
|
||||
case *backend.StdOutputLogger:
|
||||
name = LoggerNameStdOutput
|
||||
name = NameStdOutput
|
||||
case *backend.FileLogger:
|
||||
name = LoggerNameFile
|
||||
name = NameFile
|
||||
default:
|
||||
name = reflect.TypeOf(l).String()
|
||||
}
|
||||
|
@ -13,28 +13,28 @@ func TestKnownLoggers(t *testing.T) {
|
||||
b := IsKnownLogger("Unknown")
|
||||
require.False(t, b)
|
||||
|
||||
b = IsKnownLogger(LoggerNameFile)
|
||||
b = IsKnownLogger(NameFile)
|
||||
require.True(t, b)
|
||||
|
||||
// no getter
|
||||
b = HasGetter(LoggerNameStdOutput)
|
||||
b = HasGetter(NameStdOutput)
|
||||
require.False(t, b)
|
||||
// has getter
|
||||
b = HasGetter(LoggerNameDB)
|
||||
b = HasGetter(NameDB)
|
||||
require.True(t, b)
|
||||
|
||||
// no sweeper
|
||||
b = HasSweeper(LoggerNameStdOutput)
|
||||
b = HasSweeper(NameStdOutput)
|
||||
require.False(t, b)
|
||||
// has sweeper
|
||||
b = HasSweeper(LoggerNameDB)
|
||||
b = HasSweeper(NameDB)
|
||||
require.True(t, b)
|
||||
|
||||
// unknown logger
|
||||
l := KnownLoggers("unknown")
|
||||
require.Nil(t, l)
|
||||
// known logger
|
||||
l = KnownLoggers(LoggerNameDB)
|
||||
l = KnownLoggers(NameDB)
|
||||
require.NotNil(t, l)
|
||||
|
||||
// unknown level
|
||||
@ -52,14 +52,14 @@ func TestGetLoggerName(t *testing.T) {
|
||||
uuid := "uuid_for_unit_test"
|
||||
l, err := backend.NewDBLogger(uuid, "DEBUG", 4)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, LoggerNameDB, GetLoggerName(l))
|
||||
require.Equal(t, NameDB, GetLoggerName(l))
|
||||
|
||||
stdLog := backend.NewStdOutputLogger("DEBUG", backend.StdErr, 4)
|
||||
require.Equal(t, LoggerNameStdOutput, GetLoggerName(stdLog))
|
||||
require.Equal(t, NameStdOutput, GetLoggerName(stdLog))
|
||||
|
||||
fileLog, err := backend.NewFileLogger("DEBUG", path.Join(os.TempDir(), "TestFileLogger.log"), 4)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, LoggerNameFile, GetLoggerName(fileLog))
|
||||
require.Equal(t, NameFile, GetLoggerName(fileLog))
|
||||
|
||||
e := &Entry{}
|
||||
n := GetLoggerName(e)
|
||||
|
@ -15,9 +15,3 @@ func Retrieve(logID string) ([]byte, error) {
|
||||
|
||||
return val.(getter.Interface).Retrieve(logID)
|
||||
}
|
||||
|
||||
// HasLogGetterConfigured checks if a log data getter is there for using
|
||||
func HasLogGetterConfigured() bool {
|
||||
_, ok := singletons.Load(systemKeyLogDataGetter)
|
||||
return ok
|
||||
}
|
||||
|
50
src/jobservice/logger/log_data_handler_test.go
Normal file
50
src/jobservice/logger/log_data_handler_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRetrieve(t *testing.T) {
|
||||
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
|
||||
{
|
||||
Name: "STD_OUTPUT",
|
||||
Level: "DEBUG",
|
||||
},
|
||||
{
|
||||
Name: "FILE",
|
||||
Level: "INFO",
|
||||
Settings: map[string]interface{}{
|
||||
"base_dir": os.TempDir(),
|
||||
},
|
||||
Sweeper: &config.LogSweeperConfig{
|
||||
Duration: 1,
|
||||
Settings: map[string]interface{}{
|
||||
"work_dir": os.TempDir(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := Init(context.TODO())
|
||||
require.NoError(t, err)
|
||||
_, err = Retrieve("no_id")
|
||||
require.Error(t, err)
|
||||
}
|
@ -38,9 +38,11 @@ func TestDBGetter(t *testing.T) {
|
||||
require.Nil(t, err)
|
||||
|
||||
l.Debug("JobLog Debug: TestDBLoggerSweeper")
|
||||
l.Close()
|
||||
err = l.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
PrepareDBSweep()
|
||||
err = PrepareDBSweep()
|
||||
require.NoError(t, err)
|
||||
dbSweeper := NewDBSweeper(-1)
|
||||
count, err := dbSweeper.Sweep()
|
||||
require.Nil(t, err)
|
||||
|
@ -16,19 +16,19 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/runtime"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -47,8 +47,10 @@ func main() {
|
||||
panic(fmt.Sprintf("load configurations error: %s\n", err))
|
||||
}
|
||||
|
||||
// Append node ID
|
||||
vCtx := context.WithValue(context.Background(), utils.NodeID, utils.GenerateNodeID())
|
||||
// Create the root context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(vCtx)
|
||||
defer cancel()
|
||||
|
||||
// Initialize logger
|
||||
@ -57,7 +59,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Set job context initializer
|
||||
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
|
||||
runtime.JobService.SetJobContextInitializer(func(ctx context.Context) (job.Context, error) {
|
||||
secret := config.GetAuthSecret()
|
||||
if utils.IsEmptyStr(secret) {
|
||||
return nil, errors.New("empty auth secret")
|
||||
@ -65,7 +67,7 @@ func main() {
|
||||
coreURL := os.Getenv("CORE_URL")
|
||||
configURL := coreURL + common.CoreConfigPath
|
||||
cfgMgr := comcfg.NewRESTCfgManager(configURL, secret)
|
||||
jobCtx := impl.NewContext(ctx.SystemContext, cfgMgr)
|
||||
jobCtx := impl.NewContext(ctx, cfgMgr)
|
||||
|
||||
if err := jobCtx.Init(); err != nil {
|
||||
return nil, err
|
||||
@ -75,5 +77,7 @@ func main() {
|
||||
})
|
||||
|
||||
// Start
|
||||
runtime.JobService.LoadAndRun(ctx, cancel)
|
||||
if err := runtime.JobService.LoadAndRun(ctx, cancel); err != nil {
|
||||
logger.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@ -1,99 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package models
|
||||
|
||||
// Parameters for job execution.
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
// JobRequest is the request of launching a job.
|
||||
type JobRequest struct {
|
||||
Job *JobData `json:"job"`
|
||||
}
|
||||
|
||||
// JobData keeps the basic info.
|
||||
type JobData struct {
|
||||
Name string `json:"name"`
|
||||
Parameters Parameters `json:"parameters"`
|
||||
Metadata *JobMetadata `json:"metadata"`
|
||||
StatusHook string `json:"status_hook"`
|
||||
}
|
||||
|
||||
// JobMetadata stores the metadata of job.
|
||||
type JobMetadata struct {
|
||||
JobKind string `json:"kind"`
|
||||
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
|
||||
Cron string `json:"cron_spec,omitempty"`
|
||||
IsUnique bool `json:"unique"`
|
||||
}
|
||||
|
||||
// JobStats keeps the result of job launching.
|
||||
type JobStats struct {
|
||||
Stats *JobStatData `json:"job"`
|
||||
}
|
||||
|
||||
// JobStatData keeps the stats of job
|
||||
type JobStatData struct {
|
||||
JobID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
JobName string `json:"name"`
|
||||
JobKind string `json:"kind"`
|
||||
IsUnique bool `json:"unique"`
|
||||
RefLink string `json:"ref_link,omitempty"`
|
||||
CronSpec string `json:"cron_spec,omitempty"`
|
||||
EnqueueTime int64 `json:"enqueue_time"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
RunAt int64 `json:"run_at,omitempty"`
|
||||
CheckIn string `json:"check_in,omitempty"`
|
||||
CheckInAt int64 `json:"check_in_at,omitempty"`
|
||||
DieAt int64 `json:"die_at,omitempty"`
|
||||
HookStatus string `json:"hook_status,omitempty"`
|
||||
Executions []string `json:"executions,omitempty"` // For the jobs like periodic jobs, which may execute multiple times
|
||||
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
|
||||
IsMultipleExecutions bool `json:"multiple_executions"` // Indicate if the job has subsequent executions
|
||||
}
|
||||
|
||||
// JobPoolStats represents the healthy and status of all the running worker pools.
|
||||
type JobPoolStats struct {
|
||||
Pools []*JobPoolStatsData `json:"worker_pools"`
|
||||
}
|
||||
|
||||
// JobPoolStatsData represent the healthy and status of the worker pool.
|
||||
type JobPoolStatsData struct {
|
||||
WorkerPoolID string `json:"worker_pool_id"`
|
||||
StartedAt int64 `json:"started_at"`
|
||||
HeartbeatAt int64 `json:"heartbeat_at"`
|
||||
JobNames []string `json:"job_names"`
|
||||
Concurrency uint `json:"concurrency"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// JobActionRequest defines for triggering job action like stop/cancel.
|
||||
type JobActionRequest struct {
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// JobStatusChange is designed for reporting the status change via hook.
|
||||
type JobStatusChange struct {
|
||||
JobID string `json:"job_id"`
|
||||
Status string `json:"status"`
|
||||
CheckIn string `json:"check_in,omitempty"`
|
||||
Metadata *JobStatData `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// Message is designed for sub/pub messages
|
||||
type Message struct {
|
||||
Event string
|
||||
Data interface{} // generic format
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
clientTimeout = 10 * time.Second
|
||||
maxIdleConnections = 20
|
||||
idleConnectionTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// DefaultHookClient is for default use.
|
||||
var DefaultHookClient = NewHookClient()
|
||||
|
||||
// HookClient is used to post the related data to the interested parties.
|
||||
type HookClient struct {
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// NewHookClient return the ptr of the new HookClient
|
||||
func NewHookClient() *HookClient {
|
||||
client := &http.Client{
|
||||
Timeout: clientTimeout,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: maxIdleConnections,
|
||||
IdleConnTimeout: idleConnectionTimeout,
|
||||
},
|
||||
}
|
||||
|
||||
return &HookClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ReportStatus reports the status change info to the subscribed party.
|
||||
// The status includes 'checkin' info with format 'check_in:<message>'
|
||||
func (hc *HookClient) ReportStatus(hookURL string, status models.JobStatusChange) error {
|
||||
if utils.IsEmptyStr(hookURL) {
|
||||
return errors.New("empty hook url") // do nothing
|
||||
}
|
||||
|
||||
// Parse and validate URL
|
||||
url, err := url.Parse(hookURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Marshal data
|
||||
data, err := json.Marshal(&status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// New post request
|
||||
req, err := http.NewRequest(http.MethodPost, url.String(), strings.NewReader(string(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := hc.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer res.Body.Close() // close connection for reuse
|
||||
|
||||
// Should be 200
|
||||
if res.StatusCode != http.StatusOK {
|
||||
if res.ContentLength > 0 {
|
||||
// read error content and return
|
||||
dt, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.New(string(dt))
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to report status change via hook, expect '200' but got '%d'", res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package opm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
func TestHookClient(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "ok")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
err := DefaultHookClient.ReportStatus(ts.URL, models.JobStatusChange{
|
||||
JobID: "fake_job_ID",
|
||||
Status: "running",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReportStatusFailed(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte("failed"))
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
err := DefaultHookClient.ReportStatus(ts.URL, models.JobStatusChange{
|
||||
JobID: "fake_job_ID",
|
||||
Status: "running",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expect error but got nil")
|
||||
}
|
||||
}
|
@ -1,69 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opm
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
// HookStore is used to cache the hooks in memory.
|
||||
// Use job ID as key to index
|
||||
type HookStore struct {
|
||||
lock *sync.RWMutex
|
||||
data map[string]string
|
||||
}
|
||||
|
||||
// NewHookStore is to create a ptr of new HookStore.
|
||||
func NewHookStore() *HookStore {
|
||||
return &HookStore{
|
||||
lock: new(sync.RWMutex),
|
||||
data: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Add new record
|
||||
func (hs *HookStore) Add(jobID string, hookURL string) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return // do nothing
|
||||
}
|
||||
|
||||
hs.lock.Lock()
|
||||
defer hs.lock.Unlock()
|
||||
|
||||
hs.data[jobID] = hookURL
|
||||
}
|
||||
|
||||
// Get one hook url by job ID
|
||||
func (hs *HookStore) Get(jobID string) (string, bool) {
|
||||
hs.lock.RLock()
|
||||
defer hs.lock.RUnlock()
|
||||
|
||||
hookURL, ok := hs.data[jobID]
|
||||
|
||||
return hookURL, ok
|
||||
}
|
||||
|
||||
// Remove the specified one
|
||||
func (hs *HookStore) Remove(jobID string) (string, bool) {
|
||||
hs.lock.Lock()
|
||||
defer hs.lock.Unlock()
|
||||
|
||||
hookURL, ok := hs.data[jobID]
|
||||
delete(hs.data, jobID)
|
||||
|
||||
return hookURL, ok
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opm
|
||||
|
||||
import "github.com/goharbor/harbor/src/jobservice/models"
|
||||
|
||||
// Range for list scope defining
|
||||
type Range int
|
||||
|
||||
// JobStatsManager defines the methods to handle stats of job.
|
||||
type JobStatsManager interface {
|
||||
// Start to serve
|
||||
Start()
|
||||
|
||||
// Shutdown the manager
|
||||
Shutdown()
|
||||
|
||||
// Save the job stats
|
||||
// Async method to retry and improve performance
|
||||
//
|
||||
// jobStats models.JobStats : the job stats to be saved
|
||||
Save(jobStats models.JobStats)
|
||||
|
||||
// Get the job stats from backend store
|
||||
// Sync method as we need the data
|
||||
//
|
||||
// Returns:
|
||||
// models.JobStats : job stats data
|
||||
// error : error if meet any problems
|
||||
Retrieve(jobID string) (models.JobStats, error)
|
||||
|
||||
// Update the properties of the job stats
|
||||
//
|
||||
// jobID string : ID of the being retried job
|
||||
// fieldAndValues ...interface{} : One or more properties being updated
|
||||
//
|
||||
// Returns:
|
||||
// error if update failed
|
||||
Update(jobID string, fieldAndValues ...interface{}) error
|
||||
|
||||
// SetJobStatus will mark the status of job to the specified one
|
||||
// Async method to retry
|
||||
SetJobStatus(jobID string, status string)
|
||||
|
||||
// Send command fro the specified job
|
||||
//
|
||||
// jobID string : ID of the being retried job
|
||||
// command string : the command applied to the job like stop/cancel
|
||||
// isCached bool : to indicate if only cache the op command
|
||||
//
|
||||
// Returns:
|
||||
// error if it was not successfully sent
|
||||
SendCommand(jobID string, command string, isCached bool) error
|
||||
|
||||
// CtlCommand checks if control command is fired for the specified job.
|
||||
//
|
||||
// jobID string : ID of the job
|
||||
//
|
||||
// Returns:
|
||||
// the command if it was fired
|
||||
// error if it was not fired yet to meet some other problems
|
||||
CtlCommand(jobID string) (string, error)
|
||||
|
||||
// CheckIn message for the specified job like detailed progress info.
|
||||
//
|
||||
// jobID string : ID of the job
|
||||
// message string : The message being checked in
|
||||
//
|
||||
CheckIn(jobID string, message string)
|
||||
|
||||
// DieAt marks the failed jobs with the time they put into dead queue.
|
||||
//
|
||||
// jobID string : ID of the job
|
||||
// message string : The message being checked in
|
||||
//
|
||||
DieAt(jobID string, dieAt int64)
|
||||
|
||||
// RegisterHook is used to save the hook url or cache the url in memory.
|
||||
//
|
||||
// jobID string : ID of job
|
||||
// hookURL string : the hook url being registered
|
||||
// isCached bool : to indicate if only cache the hook url
|
||||
//
|
||||
// Returns:
|
||||
// error if meet any problems
|
||||
RegisterHook(jobID string, hookURL string, isCached bool) error
|
||||
|
||||
// Get hook returns the web hook url for the specified job if it is registered
|
||||
//
|
||||
// jobID string : ID of job
|
||||
//
|
||||
// Returns:
|
||||
// the web hook url if existing
|
||||
// non-nil error if meet any problems
|
||||
GetHook(jobID string) (string, error)
|
||||
|
||||
// Mark the periodic job stats expired
|
||||
//
|
||||
// jobID string : ID of job
|
||||
//
|
||||
// Returns:
|
||||
// error if meet any problems
|
||||
ExpirePeriodicJobStats(jobID string) error
|
||||
|
||||
// Persist the links between upstream job and the executions.
|
||||
//
|
||||
// upstreamJobID string: ID of the upstream job
|
||||
// executions ...string: IDs of the execution jobs
|
||||
//
|
||||
// Returns:
|
||||
// error if meet any issues
|
||||
AttachExecution(upstreamJobID string, executions ...string) error
|
||||
|
||||
// Get all the executions (IDs) fro the specified upstream Job.
|
||||
//
|
||||
// upstreamJobID string: ID of the upstream job
|
||||
// ranges ...Range: Define the start and end for the list, e.g:
|
||||
// 0, 10 means [0:10]
|
||||
// 10 means [10:]
|
||||
// empty means [0:-1]==all
|
||||
// Returns:
|
||||
// the ID list of the executions if no error occurred
|
||||
// or a non-nil error is returned
|
||||
GetExecutions(upstreamJobID string, ranges ...Range) ([]string, error)
|
||||
}
|
@ -1,178 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
commandValidTime = 5 * time.Minute
|
||||
commandSweepTickerTime = 1 * time.Hour
|
||||
// EventFireCommand for firing command event
|
||||
EventFireCommand = "fire_command"
|
||||
)
|
||||
|
||||
type oPCommand struct {
|
||||
command string
|
||||
fireTime int64
|
||||
}
|
||||
|
||||
// oPCommands maintain commands list
|
||||
type oPCommands struct {
|
||||
lock *sync.RWMutex
|
||||
commands map[string]*oPCommand
|
||||
context context.Context
|
||||
redisPool *redis.Pool
|
||||
namespace string
|
||||
stopChan chan struct{}
|
||||
doneChan chan struct{}
|
||||
}
|
||||
|
||||
// newOPCommands is constructor of OPCommands
|
||||
func newOPCommands(ctx context.Context, ns string, redisPool *redis.Pool) *oPCommands {
|
||||
return &oPCommands{
|
||||
lock: new(sync.RWMutex),
|
||||
commands: make(map[string]*oPCommand),
|
||||
context: ctx,
|
||||
redisPool: redisPool,
|
||||
namespace: ns,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
doneChan: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Start the command sweeper
|
||||
func (opc *oPCommands) Start() {
|
||||
go opc.loop()
|
||||
logger.Info("OP commands sweeper is started")
|
||||
}
|
||||
|
||||
// Stop the command sweeper
|
||||
func (opc *oPCommands) Stop() {
|
||||
opc.stopChan <- struct{}{}
|
||||
<-opc.doneChan
|
||||
}
|
||||
|
||||
// Fire command
|
||||
func (opc *oPCommands) Fire(jobID string, command string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
if command != CtlCommandStop && command != CtlCommandCancel {
|
||||
return fmt.Errorf("Unsupported command %s", command)
|
||||
}
|
||||
|
||||
notification := &models.Message{
|
||||
Event: EventFireCommand,
|
||||
Data: []string{jobID, command},
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := opc.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Do("PUBLISH", utils.KeyPeriodicNotification(opc.namespace), rawJSON)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Push command into the list
|
||||
func (opc *oPCommands) Push(jobID string, command string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
if command != CtlCommandStop && command != CtlCommandCancel {
|
||||
return fmt.Errorf("Unsupported command %s", command)
|
||||
}
|
||||
|
||||
opc.lock.Lock()
|
||||
defer opc.lock.Unlock()
|
||||
|
||||
opc.commands[jobID] = &oPCommand{
|
||||
command: command,
|
||||
fireTime: time.Now().Unix(),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pop out the command if existing
|
||||
func (opc *oPCommands) Pop(jobID string) (string, bool) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
opc.lock.RLock()
|
||||
defer opc.lock.RUnlock()
|
||||
|
||||
c, ok := opc.commands[jobID]
|
||||
if ok {
|
||||
if time.Unix(c.fireTime, 0).Add(commandValidTime).After(time.Now()) {
|
||||
delete(opc.commands, jobID)
|
||||
return c.command, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (opc *oPCommands) loop() {
|
||||
defer func() {
|
||||
logger.Info("OP commands is stopped")
|
||||
opc.doneChan <- struct{}{}
|
||||
}()
|
||||
|
||||
tk := time.NewTicker(commandSweepTickerTime)
|
||||
defer tk.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tk.C:
|
||||
opc.sweepCommands()
|
||||
case <-opc.context.Done():
|
||||
return
|
||||
case <-opc.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (opc *oPCommands) sweepCommands() {
|
||||
opc.lock.Lock()
|
||||
defer opc.lock.Unlock()
|
||||
|
||||
for k, v := range opc.commands {
|
||||
if time.Unix(v.fireTime, 0).Add(commandValidTime).After(time.Now()) {
|
||||
delete(opc.commands, k)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,826 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package opm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
processBufferSize = 1024
|
||||
opSaveStats = "save_job_stats"
|
||||
opUpdateStatus = "update_job_status"
|
||||
opCheckIn = "check_in"
|
||||
opDieAt = "mark_die_at"
|
||||
opReportStatus = "report_status"
|
||||
opPersistExecutions = "persist_executions"
|
||||
opUpdateStats = "update_job_stats"
|
||||
maxFails = 3
|
||||
jobStatsDataExpireTime = 60 * 60 * 24 * 5 // 5 days
|
||||
|
||||
// CtlCommandStop : command stop
|
||||
CtlCommandStop = "stop"
|
||||
// CtlCommandCancel : command cancel
|
||||
CtlCommandCancel = "cancel"
|
||||
// CtlCommandRetry : command retry
|
||||
CtlCommandRetry = "retry"
|
||||
|
||||
// EventRegisterStatusHook is event name of registering hook
|
||||
EventRegisterStatusHook = "register_hook"
|
||||
)
|
||||
|
||||
type queueItem struct {
|
||||
Op string
|
||||
Fails uint
|
||||
Data interface{}
|
||||
}
|
||||
|
||||
func (qi *queueItem) string() string {
|
||||
data, err := json.Marshal(qi)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%v", qi)
|
||||
}
|
||||
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// RedisJobStatsManager implements JobStatsManager based on redis.
|
||||
type RedisJobStatsManager struct {
|
||||
namespace string
|
||||
redisPool *redis.Pool
|
||||
context context.Context
|
||||
stopChan chan struct{}
|
||||
doneChan chan struct{}
|
||||
processChan chan *queueItem
|
||||
isRunning *atomic.Value
|
||||
hookStore *HookStore // cache the hook here to avoid requesting backend
|
||||
opCommands *oPCommands // maintain the OP commands
|
||||
}
|
||||
|
||||
// NewRedisJobStatsManager is constructor of RedisJobStatsManager
|
||||
func NewRedisJobStatsManager(ctx context.Context, namespace string, redisPool *redis.Pool) JobStatsManager {
|
||||
isRunning := &atomic.Value{}
|
||||
isRunning.Store(false)
|
||||
|
||||
return &RedisJobStatsManager{
|
||||
namespace: namespace,
|
||||
context: ctx,
|
||||
redisPool: redisPool,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
doneChan: make(chan struct{}, 1),
|
||||
processChan: make(chan *queueItem, processBufferSize),
|
||||
hookStore: NewHookStore(),
|
||||
isRunning: isRunning,
|
||||
opCommands: newOPCommands(ctx, namespace, redisPool),
|
||||
}
|
||||
}
|
||||
|
||||
// Start is implementation of same method in JobStatsManager interface.
|
||||
func (rjs *RedisJobStatsManager) Start() {
|
||||
if rjs.isRunning.Load().(bool) {
|
||||
return
|
||||
}
|
||||
go rjs.loop()
|
||||
rjs.opCommands.Start()
|
||||
rjs.isRunning.Store(true)
|
||||
|
||||
logger.Info("Redis job stats manager is started")
|
||||
}
|
||||
|
||||
// Shutdown is implementation of same method in JobStatsManager interface.
|
||||
func (rjs *RedisJobStatsManager) Shutdown() {
|
||||
defer func() {
|
||||
rjs.isRunning.Store(false)
|
||||
}()
|
||||
|
||||
if !(rjs.isRunning.Load().(bool)) {
|
||||
return
|
||||
}
|
||||
|
||||
rjs.opCommands.Stop()
|
||||
rjs.stopChan <- struct{}{}
|
||||
<-rjs.doneChan
|
||||
}
|
||||
|
||||
// Save is implementation of same method in JobStatsManager interface.
|
||||
// Async method
|
||||
func (rjs *RedisJobStatsManager) Save(jobStats models.JobStats) {
|
||||
item := &queueItem{
|
||||
Op: opSaveStats,
|
||||
Data: jobStats,
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
}
|
||||
|
||||
// Retrieve is implementation of same method in JobStatsManager interface.
|
||||
// Sync method
|
||||
func (rjs *RedisJobStatsManager) Retrieve(jobID string) (models.JobStats, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return models.JobStats{}, errors.New("empty job ID")
|
||||
}
|
||||
|
||||
res, err := rjs.getJobStats(jobID)
|
||||
if err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
if res.Stats.IsMultipleExecutions {
|
||||
executions, err := rjs.GetExecutions(jobID)
|
||||
if err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
res.Stats.Executions = executions
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// SetJobStatus is implementation of same method in JobStatsManager interface.
|
||||
// Async method
|
||||
func (rjs *RedisJobStatsManager) SetJobStatus(jobID string, status string) {
|
||||
if utils.IsEmptyStr(jobID) || utils.IsEmptyStr(status) {
|
||||
return
|
||||
}
|
||||
|
||||
item := &queueItem{
|
||||
Op: opUpdateStatus,
|
||||
Data: []string{jobID, status},
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
|
||||
// Report status at the same time
|
||||
rjs.submitStatusReportingItem(jobID, status, "")
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) loop() {
|
||||
controlChan := make(chan struct{})
|
||||
|
||||
defer func() {
|
||||
rjs.isRunning.Store(false)
|
||||
// Notify other sub goroutines
|
||||
close(controlChan)
|
||||
logger.Info("Redis job stats manager is stopped")
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case item := <-rjs.processChan:
|
||||
go func(item *queueItem) {
|
||||
clearHookCache := false
|
||||
if err := rjs.process(item); err != nil {
|
||||
item.Fails++
|
||||
if item.Fails < maxFails {
|
||||
logger.Warningf("Failed to process '%s' request with error: %s\n", item.Op, err)
|
||||
|
||||
// Retry after a random interval
|
||||
go func() {
|
||||
timer := time.NewTimer(time.Duration(backoff(item.Fails)) * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
select {
|
||||
case <-timer.C:
|
||||
rjs.processChan <- item
|
||||
return
|
||||
case <-controlChan:
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
logger.Errorf("Failed to process '%s' request with error: %s (%d times tried)\n", item.Op, err, maxFails)
|
||||
if item.Op == opReportStatus {
|
||||
clearHookCache = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Debugf("Operation is successfully processed: %s", item.string())
|
||||
|
||||
if item.Op == opReportStatus {
|
||||
clearHookCache = true
|
||||
}
|
||||
}
|
||||
|
||||
if clearHookCache {
|
||||
// Clear cache to save memory if job status is success or stopped.
|
||||
data := item.Data.([]string)
|
||||
status := data[2]
|
||||
if status == job.JobStatusSuccess || status == job.JobStatusStopped {
|
||||
rjs.hookStore.Remove(data[0])
|
||||
}
|
||||
}
|
||||
}(item)
|
||||
break
|
||||
case <-rjs.stopChan:
|
||||
rjs.doneChan <- struct{}{}
|
||||
return
|
||||
case <-rjs.context.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendCommand for the specified job
|
||||
func (rjs *RedisJobStatsManager) SendCommand(jobID string, command string, isCached bool) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
if command != CtlCommandStop && command != CtlCommandCancel {
|
||||
return errors.New("unknown command")
|
||||
}
|
||||
|
||||
if !isCached {
|
||||
// Let other interested parties awareness
|
||||
if err := rjs.opCommands.Fire(jobID, command); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Directly add to op commands maintaining list
|
||||
return rjs.opCommands.Push(jobID, command)
|
||||
}
|
||||
|
||||
// CheckIn mesage
|
||||
func (rjs *RedisJobStatsManager) CheckIn(jobID string, message string) {
|
||||
if utils.IsEmptyStr(jobID) || utils.IsEmptyStr(message) {
|
||||
return
|
||||
}
|
||||
|
||||
item := &queueItem{
|
||||
Op: opCheckIn,
|
||||
Data: []string{jobID, message},
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
|
||||
// Report checkin message at the same time
|
||||
rjs.submitStatusReportingItem(jobID, job.JobStatusRunning, message)
|
||||
}
|
||||
|
||||
// CtlCommand checks if control command is fired for the specified job.
|
||||
func (rjs *RedisJobStatsManager) CtlCommand(jobID string) (string, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return "", errors.New("empty job ID")
|
||||
}
|
||||
|
||||
c, ok := rjs.opCommands.Pop(jobID)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no OP command fired to job %s", jobID)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// DieAt marks the failed jobs with the time they put into dead queue.
|
||||
func (rjs *RedisJobStatsManager) DieAt(jobID string, dieAt int64) {
|
||||
if utils.IsEmptyStr(jobID) || dieAt == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
item := &queueItem{
|
||||
Op: opDieAt,
|
||||
Data: []interface{}{jobID, dieAt},
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
}
|
||||
|
||||
// RegisterHook is used to save the hook url or cache the url in memory.
|
||||
func (rjs *RedisJobStatsManager) RegisterHook(jobID string, hookURL string, isCached bool) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
if !utils.IsValidURL(hookURL) {
|
||||
return errors.New("invalid hook url")
|
||||
}
|
||||
|
||||
if !isCached {
|
||||
return rjs.saveHook(jobID, hookURL)
|
||||
}
|
||||
|
||||
rjs.hookStore.Add(jobID, hookURL)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHook returns the status web hook url for the specified job if existing
|
||||
func (rjs *RedisJobStatsManager) GetHook(jobID string) (string, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return "", errors.New("empty job ID")
|
||||
}
|
||||
|
||||
// First retrieve from the cache
|
||||
if hookURL, ok := rjs.hookStore.Get(jobID); ok {
|
||||
return hookURL, nil
|
||||
}
|
||||
|
||||
// Not hit in cache! Get it from the backend.
|
||||
hookURL, err := rjs.getHook(jobID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Cache and return
|
||||
rjs.hookStore.Add(jobID, hookURL)
|
||||
|
||||
return hookURL, nil
|
||||
}
|
||||
|
||||
// ExpirePeriodicJobStats marks the periodic job stats expired
|
||||
func (rjs *RedisJobStatsManager) ExpirePeriodicJobStats(jobID string) error {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// The periodic job (policy) is stopped/unscheduled and then
|
||||
// the stats of periodic job now can be expired
|
||||
key := utils.KeyJobStats(rjs.namespace, jobID)
|
||||
_, err := conn.Do("EXPIRE", key, jobStatsDataExpireTime)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// AttachExecution persist the links between upstream jobs and the related executions (jobs).
|
||||
func (rjs *RedisJobStatsManager) AttachExecution(upstreamJobID string, executions ...string) error {
|
||||
if len(upstreamJobID) == 0 {
|
||||
return errors.New("empty upstream job ID is not allowed")
|
||||
}
|
||||
|
||||
if len(executions) == 0 {
|
||||
return errors.New("no executions existing to persist")
|
||||
}
|
||||
|
||||
// Send to process channel
|
||||
item := &queueItem{
|
||||
Op: opPersistExecutions,
|
||||
Data: []interface{}{upstreamJobID, executions},
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetExecutions returns the existing executions (IDs) for the specified job.
|
||||
func (rjs *RedisJobStatsManager) GetExecutions(upstreamJobID string, ranges ...Range) ([]string, error) {
|
||||
if len(upstreamJobID) == 0 {
|
||||
return nil, errors.New("no upstream ID specified")
|
||||
}
|
||||
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
var start, end interface{} = "-inf", "+inf"
|
||||
if len(ranges) >= 1 {
|
||||
start = int(ranges[0])
|
||||
}
|
||||
if len(ranges) > 1 {
|
||||
end = int(ranges[1])
|
||||
}
|
||||
|
||||
key := utils.KeyUpstreamJobAndExecutions(rjs.namespace, upstreamJobID)
|
||||
ids, err := redis.Strings(conn.Do("ZRANGEBYSCORE", key, start, end))
|
||||
if err != nil {
|
||||
if err == redis.ErrNil {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Update the properties of job stats
|
||||
func (rjs *RedisJobStatsManager) Update(jobID string, fieldAndValues ...interface{}) error {
|
||||
if len(jobID) == 0 {
|
||||
return errors.New("no updating job")
|
||||
}
|
||||
|
||||
if len(fieldAndValues) == 0 || len(fieldAndValues)%2 != 0 {
|
||||
return errors.New("filed and its value should be pair")
|
||||
}
|
||||
|
||||
data := []interface{}{}
|
||||
data = append(data, jobID)
|
||||
data = append(data, fieldAndValues...)
|
||||
|
||||
item := &queueItem{
|
||||
Op: opUpdateStats,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) submitStatusReportingItem(jobID string, status, checkIn string) {
|
||||
// Let it run in a separate goroutine to avoid waiting more time
|
||||
go func() {
|
||||
var (
|
||||
hookURL string
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
|
||||
hookURL, ok = rjs.hookStore.Get(jobID)
|
||||
if !ok {
|
||||
// Retrieve from backend
|
||||
hookURL, err = rjs.getHook(jobID)
|
||||
if err != nil || !utils.IsValidURL(hookURL) {
|
||||
// logged and exit
|
||||
logger.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
item := &queueItem{
|
||||
Op: opReportStatus,
|
||||
Data: []string{jobID, hookURL, status, checkIn},
|
||||
}
|
||||
|
||||
rjs.processChan <- item
|
||||
}()
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) reportStatus(jobID string, hookURL, status, checkIn string) error {
|
||||
reportingStatus := models.JobStatusChange{
|
||||
JobID: jobID,
|
||||
Status: status,
|
||||
CheckIn: checkIn,
|
||||
}
|
||||
// Return the whole metadata of the job.
|
||||
// To support forward compatibility, keep the original fields `Status` and `CheckIn`.
|
||||
// TODO: If querying job stats causes performance issues, a two-level cache should be enabled.
|
||||
jobStats, err := rjs.getJobStats(jobID)
|
||||
if err != nil {
|
||||
// Just logged
|
||||
logger.Errorf("Retrieving stats of job %s for hook reporting failed with error: %s", jobID, err)
|
||||
} else {
|
||||
// Override status/check in message
|
||||
// Just double confirmation
|
||||
jobStats.Stats.CheckIn = checkIn
|
||||
jobStats.Stats.Status = status
|
||||
reportingStatus.Metadata = jobStats.Stats
|
||||
}
|
||||
|
||||
return DefaultHookClient.ReportStatus(hookURL, reportingStatus)
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) updateJobStats(jobID string, fieldAndValues ...interface{}) error {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := utils.KeyJobStats(rjs.namespace, jobID)
|
||||
args := make([]interface{}, 0, len(fieldAndValues)+1)
|
||||
|
||||
args = append(args, key)
|
||||
args = append(args, fieldAndValues...)
|
||||
args = append(args, "update_time", time.Now().Unix())
|
||||
|
||||
_, err := conn.Do("HMSET", args...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) updateJobStatus(jobID string, status string) error {
|
||||
args := make([]interface{}, 0, 4)
|
||||
args = append(args, "status", status)
|
||||
if status == job.JobStatusSuccess {
|
||||
// make sure the 'die_at' is reset in case it's a retrying job
|
||||
args = append(args, "die_at", 0)
|
||||
}
|
||||
|
||||
return rjs.updateJobStats(jobID, args...)
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) checkIn(jobID string, message string) error {
|
||||
|
||||
now := time.Now().Unix()
|
||||
args := make([]interface{}, 0, 4)
|
||||
args = append(args, "check_in", message, "check_in_at", now)
|
||||
|
||||
return rjs.updateJobStats(jobID, args...)
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) dieAt(jobID string, baseTime int64) error {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Query the dead job in the time scope of [baseTime,baseTime+5]
|
||||
key := utils.RedisKeyDead(rjs.namespace)
|
||||
jobWithScores, err := utils.GetZsetByScore(rjs.redisPool, key, []int64{baseTime, baseTime + 5})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, jws := range jobWithScores {
|
||||
if j, err := utils.DeSerializeJob(jws.JobBytes); err == nil {
|
||||
if j.ID == jobID {
|
||||
// Found
|
||||
args := make([]interface{}, 0, 6)
|
||||
args = append(args, "die_at", jws.Score)
|
||||
return rjs.updateJobStats(jobID, args...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("seems %s is not a dead job", jobID)
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) getJobStats(jobID string) (models.JobStats, error) {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := utils.KeyJobStats(rjs.namespace, jobID)
|
||||
vals, err := redis.Strings(conn.Do("HGETALL", key))
|
||||
if err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
if vals == nil || len(vals) == 0 {
|
||||
return models.JobStats{}, errs.NoObjectFoundError(fmt.Sprintf("job '%s'", jobID))
|
||||
}
|
||||
|
||||
res := models.JobStats{
|
||||
Stats: &models.JobStatData{},
|
||||
}
|
||||
for i, l := 0, len(vals); i < l; i = i + 2 {
|
||||
prop := vals[i]
|
||||
value := vals[i+1]
|
||||
switch prop {
|
||||
case "id":
|
||||
res.Stats.JobID = value
|
||||
break
|
||||
case "name":
|
||||
res.Stats.JobName = value
|
||||
break
|
||||
case "kind":
|
||||
res.Stats.JobKind = value
|
||||
case "unique":
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
v = false
|
||||
}
|
||||
res.Stats.IsUnique = v
|
||||
case "status":
|
||||
res.Stats.Status = value
|
||||
break
|
||||
case "ref_link":
|
||||
res.Stats.RefLink = value
|
||||
break
|
||||
case "enqueue_time":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Stats.EnqueueTime = v
|
||||
break
|
||||
case "update_time":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Stats.UpdateTime = v
|
||||
break
|
||||
case "run_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Stats.RunAt = v
|
||||
break
|
||||
case "check_in_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Stats.CheckInAt = v
|
||||
break
|
||||
case "check_in":
|
||||
res.Stats.CheckIn = value
|
||||
break
|
||||
case "cron_spec":
|
||||
res.Stats.CronSpec = value
|
||||
break
|
||||
case "die_at":
|
||||
v, _ := strconv.ParseInt(value, 10, 64)
|
||||
res.Stats.DieAt = v
|
||||
case "upstream_job_id":
|
||||
res.Stats.UpstreamJobID = value
|
||||
break
|
||||
case "multiple_executions":
|
||||
v, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
v = false
|
||||
}
|
||||
res.Stats.IsMultipleExecutions = v
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) saveJobStats(jobStats models.JobStats) error {
|
||||
if jobStats.Stats == nil {
|
||||
return errors.New("malformed job stats object")
|
||||
}
|
||||
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := utils.KeyJobStats(rjs.namespace, jobStats.Stats.JobID)
|
||||
args := make([]interface{}, 0)
|
||||
args = append(args, key)
|
||||
args = append(args,
|
||||
"id", jobStats.Stats.JobID,
|
||||
"name", jobStats.Stats.JobName,
|
||||
"kind", jobStats.Stats.JobKind,
|
||||
"unique", jobStats.Stats.IsUnique,
|
||||
"status", jobStats.Stats.Status,
|
||||
"ref_link", jobStats.Stats.RefLink,
|
||||
"enqueue_time", jobStats.Stats.EnqueueTime,
|
||||
"update_time", jobStats.Stats.UpdateTime,
|
||||
"run_at", jobStats.Stats.RunAt,
|
||||
"cron_spec", jobStats.Stats.CronSpec,
|
||||
"multiple_executions", jobStats.Stats.IsMultipleExecutions,
|
||||
)
|
||||
if jobStats.Stats.CheckInAt > 0 && !utils.IsEmptyStr(jobStats.Stats.CheckIn) {
|
||||
args = append(args,
|
||||
"check_in", jobStats.Stats.CheckIn,
|
||||
"check_in_at", jobStats.Stats.CheckInAt,
|
||||
)
|
||||
}
|
||||
if jobStats.Stats.DieAt > 0 {
|
||||
args = append(args, "die_at", jobStats.Stats.DieAt)
|
||||
}
|
||||
|
||||
if len(jobStats.Stats.UpstreamJobID) > 0 {
|
||||
args = append(args, "upstream_job_id", jobStats.Stats.UpstreamJobID)
|
||||
}
|
||||
|
||||
conn.Send("HMSET", args...)
|
||||
// If job kind is periodic job, expire time should not be set
|
||||
// If job kind is scheduled job, expire time should be runAt+1day
|
||||
if jobStats.Stats.JobKind != job.JobKindPeriodic {
|
||||
var expireTime int64 = jobStatsDataExpireTime
|
||||
if jobStats.Stats.JobKind == job.JobKindScheduled {
|
||||
nowTime := time.Now().Unix()
|
||||
future := jobStats.Stats.RunAt - nowTime
|
||||
if future > 0 {
|
||||
expireTime += future
|
||||
}
|
||||
}
|
||||
expireTime += rand.Int63n(30) // Avoid lots of keys being expired at the same time
|
||||
conn.Send("EXPIRE", key, expireTime)
|
||||
}
|
||||
|
||||
return conn.Flush()
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) saveExecutions(upstreamJobID string, executions []string) error {
|
||||
key := utils.KeyUpstreamJobAndExecutions(rjs.namespace, upstreamJobID)
|
||||
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
err := conn.Send("MULTI")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := []interface{}{key}
|
||||
baseScore := time.Now().Unix()
|
||||
for index, execution := range executions {
|
||||
args = append(args, baseScore+int64(index), execution)
|
||||
}
|
||||
|
||||
if err := conn.Send("ZADD", args...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add expire time
|
||||
if err := conn.Send("EXPIRE", key, jobStatsDataExpireTime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = conn.Do("EXEC")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) process(item *queueItem) error {
|
||||
switch item.Op {
|
||||
case opSaveStats:
|
||||
jobStats := item.Data.(models.JobStats)
|
||||
return rjs.saveJobStats(jobStats)
|
||||
case opUpdateStatus:
|
||||
data := item.Data.([]string)
|
||||
return rjs.updateJobStatus(data[0], data[1])
|
||||
case opCheckIn:
|
||||
data := item.Data.([]string)
|
||||
return rjs.checkIn(data[0], data[1])
|
||||
case opDieAt:
|
||||
data := item.Data.([]interface{})
|
||||
return rjs.dieAt(data[0].(string), data[1].(int64))
|
||||
case opReportStatus:
|
||||
data := item.Data.([]string)
|
||||
return rjs.reportStatus(data[0], data[1], data[2], data[3])
|
||||
case opPersistExecutions:
|
||||
data := item.Data.([]interface{})
|
||||
return rjs.saveExecutions(data[0].(string), data[1].([]string))
|
||||
case opUpdateStats:
|
||||
data := item.Data.([]interface{})
|
||||
return rjs.updateJobStats(data[0].(string), data[1:]...)
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HookData keeps the hook url info
|
||||
type HookData struct {
|
||||
JobID string `json:"job_id"`
|
||||
HookURL string `json:"hook_url"`
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) saveHook(jobID string, hookURL string) error {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := utils.KeyJobStats(rjs.namespace, jobID)
|
||||
args := make([]interface{}, 0, 3)
|
||||
args = append(args, key, "status_hook", hookURL)
|
||||
msg := &models.Message{
|
||||
Event: EventRegisterStatusHook,
|
||||
Data: &HookData{
|
||||
JobID: jobID,
|
||||
HookURL: hookURL,
|
||||
},
|
||||
}
|
||||
rawJSON, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// hook is saved into the job stats
|
||||
// We'll not set expire time here, the expire time of the key will be set when saving job stats
|
||||
if err := conn.Send("MULTI"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := conn.Send("HMSET", args...); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := conn.Send("PUBLISH", utils.KeyPeriodicNotification(rjs.namespace), rawJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = conn.Do("EXEC")
|
||||
return err
|
||||
}
|
||||
|
||||
func (rjs *RedisJobStatsManager) getHook(jobID string) (string, error) {
|
||||
conn := rjs.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
key := utils.KeyJobStats(rjs.namespace, jobID)
|
||||
hookURL, err := redis.String(conn.Do("HGET", key, "status_hook"))
|
||||
if err != nil {
|
||||
if err == redis.ErrNil {
|
||||
return "", fmt.Errorf("no registered web hook found for job '%s'", jobID)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hookURL, nil
|
||||
}
|
||||
|
||||
func backoff(seed uint) int {
|
||||
if seed < 1 {
|
||||
seed = 1
|
||||
}
|
||||
|
||||
return int(math.Pow(float64(seed+1), float64(seed))) + rand.Intn(5)
|
||||
}
|
@ -1,342 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package opm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
dialConnectionTimeout = 30 * time.Second
|
||||
healthCheckPeriod = time.Minute
|
||||
dialReadTimeout = healthCheckPeriod + 10*time.Second
|
||||
dialWriteTimeout = 10 * time.Second
|
||||
testingRedisHost = "REDIS_HOST"
|
||||
testingNamespace = "testing_job_service_v2"
|
||||
)
|
||||
|
||||
var redisHost = getRedisHost()
|
||||
var redisPool = &redis.Pool{
|
||||
MaxActive: 2,
|
||||
MaxIdle: 2,
|
||||
Wait: true,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
return redis.Dial(
|
||||
"tcp",
|
||||
fmt.Sprintf("%s:%d", redisHost, 6379),
|
||||
redis.DialConnectTimeout(dialConnectionTimeout),
|
||||
redis.DialReadTimeout(dialReadTimeout),
|
||||
redis.DialWriteTimeout(dialWriteTimeout),
|
||||
)
|
||||
},
|
||||
}
|
||||
|
||||
func TestSetJobStatus(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
// make sure data existing
|
||||
testingStats := createFakeStats()
|
||||
mgr.Save(testingStats)
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
mgr.SetJobStatus("fake_job_ID", "running")
|
||||
<-time.After(100 * time.Millisecond)
|
||||
stats, err := mgr.Retrieve("fake_job_ID")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.Stats.Status != "running" {
|
||||
t.Fatalf("expect job status 'running' but got '%s'\n", stats.Stats.Status)
|
||||
}
|
||||
|
||||
key := utils.KeyJobStats(testingNamespace, "fake_job_ID")
|
||||
if err := clear(key, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCommand(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
if err := mgr.SendCommand("fake_job_ID", CtlCommandStop, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if cmd, err := mgr.CtlCommand("fake_job_ID"); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if cmd != CtlCommandStop {
|
||||
t.Fatalf("expect '%s' but got '%s'", CtlCommandStop, cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDieAt(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
testingStats := createFakeStats()
|
||||
mgr.Save(testingStats)
|
||||
|
||||
dieAt := time.Now().Unix()
|
||||
if err := createDeadJob(redisPool.Get(), dieAt); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-time.After(200 * time.Millisecond)
|
||||
mgr.DieAt("fake_job_ID", dieAt)
|
||||
<-time.After(300 * time.Millisecond)
|
||||
|
||||
stats, err := mgr.Retrieve("fake_job_ID")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.Stats.DieAt != dieAt {
|
||||
t.Fatalf("expect die at '%d' but got '%d'\n", dieAt, stats.Stats.DieAt)
|
||||
}
|
||||
|
||||
key := utils.KeyJobStats(testingNamespace, "fake_job_ID")
|
||||
if err := clear(key, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key2 := utils.RedisKeyDead(testingNamespace)
|
||||
if err := clear(key2, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterHook(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
if err := mgr.RegisterHook("fake_job_ID", "http://localhost:9999", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := utils.KeyJobStats(testingNamespace, "fake_job_ID")
|
||||
if err := clear(key, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpireJobStats(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
// make sure data existing
|
||||
testingStats := createFakeStats()
|
||||
mgr.Save(testingStats)
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
if err := mgr.ExpirePeriodicJobStats("fake_job_ID"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := utils.KeyJobStats(testingNamespace, "fake_job_ID")
|
||||
if err := clear(key, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckIn(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
// make sure data existing
|
||||
testingStats := createFakeStats()
|
||||
mgr.Save(testingStats)
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
// Start http server
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
statusReport := &models.JobStatusChange{}
|
||||
if err := json.Unmarshal(data, statusReport); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if statusReport.Metadata == nil || statusReport.Metadata.JobID != "fake_job_ID" {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintln(w, "ok")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
if err := mgr.RegisterHook("fake_job_ID", ts.URL, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
mgr.CheckIn("fake_job_ID", "checkin")
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
stats, err := mgr.Retrieve("fake_job_ID")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if stats.Stats.CheckIn != "checkin" {
|
||||
t.Fatalf("expect check in info 'checkin' but got '%s'\n", stats.Stats.CheckIn)
|
||||
}
|
||||
|
||||
key := utils.KeyJobStats(testingNamespace, "fake_job_ID")
|
||||
if err := clear(key, redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutionRelated(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
if err := mgr.AttachExecution("upstream_id", "id1", "id2", "id3"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for data is stable
|
||||
<-time.After(200 * time.Millisecond)
|
||||
ids, err := mgr.GetExecutions("upstream_id")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if strings.Join(ids, "/") != "id1/id2/id3" {
|
||||
t.Fatalf("expect 'id1/id2/id3' but got %s", strings.Join(ids, " / "))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateJobStats(t *testing.T) {
|
||||
mgr := createStatsManager(redisPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
// make sure data existing
|
||||
testingStats := createFakeStats()
|
||||
mgr.Save(testingStats)
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
mgr.Update("fake_job_ID", "status", "Error")
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
updatedStats, err := mgr.Retrieve("fake_job_ID")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if updatedStats.Stats.Status != "Error" {
|
||||
t.Fatalf("expect status to be '%s' but got '%s'", "Error", updatedStats.Stats.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func getRedisHost() string {
|
||||
redisHost := os.Getenv(testingRedisHost)
|
||||
if redisHost == "" {
|
||||
redisHost = "localhost" // for local test
|
||||
}
|
||||
|
||||
return redisHost
|
||||
}
|
||||
|
||||
func createStatsManager(redisPool *redis.Pool) JobStatsManager {
|
||||
ctx := context.Background()
|
||||
return NewRedisJobStatsManager(ctx, testingNamespace, redisPool)
|
||||
}
|
||||
|
||||
func clear(key string, conn redis.Conn) error {
|
||||
if conn != nil {
|
||||
defer conn.Close()
|
||||
_, err := conn.Do("DEL", key)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("failed to clear")
|
||||
}
|
||||
|
||||
func createFakeStats() models.JobStats {
|
||||
testingStats := models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: "fake_job_ID",
|
||||
JobKind: job.JobKindPeriodic,
|
||||
JobName: "fake_job",
|
||||
Status: "Pending",
|
||||
IsUnique: false,
|
||||
RefLink: "/api/v1/jobs/fake_job_ID",
|
||||
CronSpec: "5 * * * * *",
|
||||
EnqueueTime: time.Now().Unix(),
|
||||
UpdateTime: time.Now().Unix(),
|
||||
},
|
||||
}
|
||||
|
||||
return testingStats
|
||||
}
|
||||
|
||||
func createDeadJob(conn redis.Conn, dieAt int64) error {
|
||||
dead := make(map[string]interface{})
|
||||
dead["name"] = "fake_job"
|
||||
dead["id"] = "fake_job_ID"
|
||||
dead["args"] = make(map[string]interface{})
|
||||
dead["fails"] = 3
|
||||
dead["err"] = "testing error"
|
||||
dead["failed_at"] = dieAt
|
||||
|
||||
rawJSON, err := json.Marshal(&dead)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer conn.Close()
|
||||
key := utils.RedisKeyDead(testingNamespace)
|
||||
_, err = conn.Do("ZADD", key, dieAt, rawJSON)
|
||||
return err
|
||||
}
|
293
src/jobservice/period/basic_scheduler.go
Normal file
293
src/jobservice/period/basic_scheduler.go
Normal file
@ -0,0 +1,293 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// basicScheduler manages the periodic scheduling policies.
|
||||
type basicScheduler struct {
|
||||
context context.Context
|
||||
pool *redis.Pool
|
||||
namespace string
|
||||
enqueuer *enqueuer
|
||||
client *work.Client
|
||||
ctl lcm.Controller
|
||||
}
|
||||
|
||||
// NewScheduler is constructor of basicScheduler
|
||||
func NewScheduler(ctx context.Context, namespace string, pool *redis.Pool, ctl lcm.Controller) Scheduler {
|
||||
return &basicScheduler{
|
||||
context: ctx,
|
||||
pool: pool,
|
||||
namespace: namespace,
|
||||
enqueuer: newEnqueuer(ctx, namespace, pool, ctl),
|
||||
client: work.NewClient(namespace, pool),
|
||||
ctl: ctl,
|
||||
}
|
||||
}
|
||||
|
||||
// Start the periodic scheduling process
|
||||
// Blocking call here
|
||||
func (bs *basicScheduler) Start() error {
|
||||
defer func() {
|
||||
logger.Info("Basic scheduler is stopped")
|
||||
}()
|
||||
|
||||
// Try best to do
|
||||
go bs.clearDirtyJobs()
|
||||
|
||||
logger.Info("Basic scheduler is started")
|
||||
|
||||
// start enqueuer
|
||||
return bs.enqueuer.start()
|
||||
}
|
||||
|
||||
// Stop the periodic scheduling process
|
||||
func (bs *basicScheduler) Stop() error {
|
||||
// stop everything
|
||||
bs.enqueuer.stopChan <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Schedule is implementation of the same method in period.Interface
|
||||
func (bs *basicScheduler) Schedule(p *Policy) (int64, error) {
|
||||
if p == nil {
|
||||
return -1, errors.New("bad policy object: nil")
|
||||
}
|
||||
|
||||
if err := p.Validate(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
conn := bs.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
// Do the 1st round of enqueuing
|
||||
bs.enqueuer.scheduleNextJobs(p, conn)
|
||||
|
||||
// Serialize data
|
||||
rawJSON, err := p.Serialize()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Prepare publish message
|
||||
m := &message{
|
||||
Event: changeEventSchedule,
|
||||
Data: p,
|
||||
}
|
||||
|
||||
msgJSON, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
pid := time.Now().Unix()
|
||||
|
||||
// Save to redis db and publish notification via redis transaction
|
||||
err = conn.Send("MULTI")
|
||||
err = conn.Send("ZADD", rds.KeyPeriodicPolicy(bs.namespace), pid, rawJSON)
|
||||
err = conn.Send("PUBLISH", rds.KeyPeriodicNotification(bs.namespace), msgJSON)
|
||||
if _, err := conn.Do("EXEC"); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return pid, nil
|
||||
}
|
||||
|
||||
// UnSchedule is implementation of the same method in period.Interface
|
||||
func (bs *basicScheduler) UnSchedule(policyID string) error {
|
||||
if utils.IsEmptyStr(policyID) {
|
||||
return errors.New("bad periodic job ID: nil")
|
||||
}
|
||||
|
||||
tracker, err := bs.ctl.Track(policyID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If errors occurred when getting the numeric ID of periodic job,
|
||||
// may be because the specified job is not a valid periodic job.
|
||||
numericID, err := tracker.NumericID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := bs.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
// Get the un-scheduling policy object
|
||||
bytes, err := redis.Values(conn.Do("ZRANGEBYSCORE", rds.KeyPeriodicPolicy(bs.namespace), numericID, numericID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := &Policy{}
|
||||
if len(bytes) > 0 {
|
||||
if rawPolicy, ok := bytes[0].([]byte); ok {
|
||||
if err := p.DeSerialize(rawPolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(p.ID) {
|
||||
// Deserialize failed
|
||||
return errors.Errorf("no valid periodic job policy found: %s:%d", policyID, numericID)
|
||||
}
|
||||
|
||||
notification := &message{
|
||||
Event: changeEventUnSchedule,
|
||||
Data: p,
|
||||
}
|
||||
|
||||
msgJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// REM from redis db with transaction way
|
||||
err = conn.Send("MULTI")
|
||||
err = conn.Send("ZREMRANGEBYSCORE", rds.KeyPeriodicPolicy(bs.namespace), numericID, numericID) // Accurately remove the item with the specified score
|
||||
err = conn.Send("PUBLISH", rds.KeyPeriodicNotification(bs.namespace), msgJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = conn.Do("EXEC")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Expire periodic job stats
|
||||
if err := tracker.Expire(); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
// Switch the job stats to stopped
|
||||
// Should not block the next clear action
|
||||
err = tracker.Stop()
|
||||
|
||||
// Get downstream executions of the periodic job
|
||||
// And clear these executions
|
||||
// This is a try best action, its failure will not cause the unschedule action failed.
|
||||
// Failure errors will be only logged here
|
||||
eKey := rds.KeyUpstreamJobAndExecutions(bs.namespace, policyID)
|
||||
if eIDs, err := getPeriodicExecutions(conn, eKey); err != nil {
|
||||
logger.Errorf("Get executions for periodic job %s error: %s", policyID, err)
|
||||
} else {
|
||||
if len(eIDs) == 0 {
|
||||
logger.Debugf("no stopped executions: %s", policyID)
|
||||
}
|
||||
for _, eID := range eIDs {
|
||||
eTracker, err := bs.ctl.Track(eID)
|
||||
if err != nil {
|
||||
logger.Errorf("Track execution %s error: %s", eID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
e := eTracker.Job()
|
||||
// Only need to care the pending and running ones
|
||||
// Do clear
|
||||
if job.ScheduledStatus == job.Status(e.Info.Status) {
|
||||
// Please pay attention here, the job ID used in the scheduled jon queue is
|
||||
// the ID of the periodic job (policy).
|
||||
if err := bs.client.DeleteScheduledJob(e.Info.RunAt, policyID); err != nil {
|
||||
logger.Errorf("Delete scheduled job %s error: %s", eID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mark job status to stopped to block execution.
|
||||
// The executions here should not be in the final states,
|
||||
// double confirmation: only stop the stopped ones.
|
||||
if job.RunningStatus.Compare(job.Status(e.Info.Status)) >= 0 {
|
||||
if err := eTracker.Stop(); err != nil {
|
||||
logger.Errorf("Stop execution %s error: %s", eID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear all the dirty jobs
|
||||
// A scheduled job will be marked as dirty job only if the enqueued timestamp has expired a horizon.
|
||||
// This is a try best action
|
||||
func (bs *basicScheduler) clearDirtyJobs() {
|
||||
conn := bs.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
nowEpoch := time.Now().Unix()
|
||||
scope := nowEpoch - int64(enqueuerHorizon/time.Minute)*60
|
||||
|
||||
jobScores, err := rds.GetZsetByScore(conn, rds.RedisKeyScheduled(bs.namespace), []int64{0, scope})
|
||||
if err != nil {
|
||||
logger.Errorf("Get dirty jobs error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, jobScore := range jobScores {
|
||||
j, err := utils.DeSerializeJob(jobScore.JobBytes)
|
||||
if err != nil {
|
||||
logger.Errorf("Deserialize dirty job error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = bs.client.DeleteScheduledJob(jobScore.Score, j.ID); err != nil {
|
||||
logger.Errorf("Remove dirty scheduled job error: %s", err)
|
||||
} else {
|
||||
logger.Debugf("Remove dirty scheduled job: %s run at %#v", j.ID, time.Unix(jobScore.Score, 0).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get relevant executions for the periodic job
|
||||
func getPeriodicExecutions(conn redis.Conn, key string) ([]string, error) {
|
||||
args := []interface{}{key, 0, "+inf"}
|
||||
|
||||
list, err := redis.Values(conn.Do("ZRANGEBYSCORE", args...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := make([]string, 0)
|
||||
for _, item := range list {
|
||||
if eID, ok := item.([]byte); ok {
|
||||
results = append(results, string(eID))
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
135
src/jobservice/period/basic_scheduler_test.go
Normal file
135
src/jobservice/period/basic_scheduler_test.go
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package period
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BasicSchedulerTestSuite tests functions of basic scheduler
|
||||
type BasicSchedulerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
cancel context.CancelFunc
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
|
||||
lcmCtl lcm.Controller
|
||||
scheduler Scheduler
|
||||
}
|
||||
|
||||
// SetupSuite prepares the test suite
|
||||
func (suite *BasicSchedulerTestSuite) SetupSuite() {
|
||||
ctx, cancel := context.WithCancel(context.WithValue(context.Background(), utils.NodeID, "fake_node_ID"))
|
||||
suite.cancel = cancel
|
||||
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
|
||||
envCtx := &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
}
|
||||
|
||||
suite.lcmCtl = lcm.NewController(
|
||||
envCtx,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
func(hookURL string, change *job.StatusChange) error { return nil },
|
||||
)
|
||||
|
||||
suite.scheduler = NewScheduler(ctx, suite.namespace, suite.pool, suite.lcmCtl)
|
||||
}
|
||||
|
||||
// TearDownSuite clears the test suite
|
||||
func (suite *BasicSchedulerTestSuite) TearDownSuite() {
|
||||
suite.cancel()
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestSchedulerTestSuite is entry of go test
|
||||
func TestSchedulerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(BasicSchedulerTestSuite))
|
||||
}
|
||||
|
||||
// TestScheduler tests scheduling and un-scheduling
|
||||
func (suite *BasicSchedulerTestSuite) TestScheduler() {
|
||||
go func() {
|
||||
<-time.After(1 * time.Second)
|
||||
_ = suite.scheduler.Stop()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
defer func() {
|
||||
require.NoError(suite.T(), err, "start scheduler: nil error expected but got %s", err)
|
||||
}()
|
||||
|
||||
err = suite.scheduler.Start()
|
||||
}()
|
||||
|
||||
// Prepare one
|
||||
now := time.Now()
|
||||
minute := now.Minute()
|
||||
if minute+2 >= 60 {
|
||||
minute = minute - 2
|
||||
}
|
||||
coreSpec := fmt.Sprintf("30,50 %d * * * *", minute+2)
|
||||
p := &Policy{
|
||||
ID: "fake_policy",
|
||||
JobName: job.SampleJob,
|
||||
CronSpec: coreSpec,
|
||||
}
|
||||
|
||||
pid, err := suite.scheduler.Schedule(p)
|
||||
require.NoError(suite.T(), err, "schedule: nil error expected but got %s", err)
|
||||
assert.Condition(suite.T(), func() bool {
|
||||
return pid > 0
|
||||
}, "schedule: returned pid should >0")
|
||||
|
||||
jobStats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: p.ID,
|
||||
Status: job.ScheduledStatus.String(),
|
||||
JobName: job.SampleJob,
|
||||
JobKind: job.KindPeriodic,
|
||||
NumericPID: pid,
|
||||
CronSpec: coreSpec,
|
||||
},
|
||||
}
|
||||
_, err = suite.lcmCtl.New(jobStats)
|
||||
require.NoError(suite.T(), err, "lcm new: nil error expected but got %s", err)
|
||||
|
||||
err = suite.scheduler.UnSchedule(p.ID)
|
||||
require.NoError(suite.T(), err, "unschedule: nil error expected but got %s", err)
|
||||
}
|
@ -15,272 +15,304 @@
|
||||
package period
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/robfig/cron"
|
||||
)
|
||||
|
||||
const (
|
||||
periodicEnqueuerSleep = 2 * time.Minute
|
||||
periodicEnqueuerHorizon = 4 * time.Minute
|
||||
enqueuerSleep = 2 * time.Minute
|
||||
enqueuerHorizon = 4 * time.Minute
|
||||
neverExecuted = 365 * 24 * time.Hour
|
||||
|
||||
// PeriodicExecutionMark marks the scheduled job to a periodic execution
|
||||
PeriodicExecutionMark = "_job_kind_periodic_"
|
||||
)
|
||||
|
||||
type periodicEnqueuer struct {
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
policyStore *periodicJobPolicyStore
|
||||
stopChan chan struct{}
|
||||
doneStoppingChan chan struct{}
|
||||
statsManager opm.JobStatsManager
|
||||
identity string
|
||||
type enqueuer struct {
|
||||
namespace string
|
||||
context context.Context
|
||||
pool *redis.Pool
|
||||
policyStore *policyStore
|
||||
ctl lcm.Controller
|
||||
// Diff with other nodes
|
||||
nodeID string
|
||||
// Track the error of enqueuing
|
||||
lastEnqueueErr error
|
||||
// For stop
|
||||
stopChan chan bool
|
||||
}
|
||||
|
||||
func newPeriodicEnqueuer(namespace string, pool *redis.Pool, policyStore *periodicJobPolicyStore, statsManager opm.JobStatsManager) *periodicEnqueuer {
|
||||
return &periodicEnqueuer{
|
||||
namespace: namespace,
|
||||
pool: pool,
|
||||
policyStore: policyStore,
|
||||
statsManager: statsManager,
|
||||
stopChan: make(chan struct{}),
|
||||
doneStoppingChan: make(chan struct{}),
|
||||
identity: utils.MakeIdentifier(), // Currently, use a generated ID
|
||||
func newEnqueuer(ctx context.Context, namespace string, pool *redis.Pool, ctl lcm.Controller) *enqueuer {
|
||||
nodeID := ctx.Value(utils.NodeID)
|
||||
if nodeID == nil {
|
||||
// Must be failed
|
||||
panic("missing node ID in the system context of periodic enqueuer")
|
||||
}
|
||||
|
||||
return &enqueuer{
|
||||
context: ctx,
|
||||
namespace: namespace,
|
||||
pool: pool,
|
||||
policyStore: newPolicyStore(ctx, namespace, pool),
|
||||
ctl: ctl,
|
||||
stopChan: make(chan bool, 1),
|
||||
nodeID: nodeID.(string),
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) start() {
|
||||
go pe.loop()
|
||||
// Blocking call
|
||||
func (e *enqueuer) start() error {
|
||||
// Load policies first when starting
|
||||
if err := e.policyStore.load(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go e.loop()
|
||||
logger.Info("Periodic enqueuer is started")
|
||||
|
||||
return e.policyStore.serve()
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) stop() {
|
||||
pe.stopChan <- struct{}{}
|
||||
<-pe.doneStoppingChan
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) loop() {
|
||||
func (e *enqueuer) loop() {
|
||||
defer func() {
|
||||
logger.Info("Periodic enqueuer is stopped")
|
||||
}()
|
||||
// Begin reaping periodically
|
||||
timer := time.NewTimer(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
if pe.shouldEnqueue() {
|
||||
err := pe.enqueue()
|
||||
if err != nil {
|
||||
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("Enqueue condition not matched, do nothing.")
|
||||
}
|
||||
// Do enqueue immediately when starting
|
||||
isHit := e.checkAndEnqueue()
|
||||
|
||||
// Begin reaping periodically
|
||||
timer := time.NewTimer(e.nextTurn(isHit, e.lastEnqueueErr != nil))
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-pe.stopChan:
|
||||
pe.doneStoppingChan <- struct{}{}
|
||||
case <-e.stopChan:
|
||||
// Stop policy store now
|
||||
e.policyStore.stopChan <- true
|
||||
return
|
||||
case <-timer.C:
|
||||
timer.Reset(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
|
||||
if pe.shouldEnqueue() {
|
||||
err := pe.enqueue()
|
||||
if err != nil {
|
||||
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
|
||||
}
|
||||
} else {
|
||||
logger.Debug("Enqueue condition not matched, do nothing.")
|
||||
}
|
||||
// Pause the timer for completing the processing this time
|
||||
timer.Reset(neverExecuted)
|
||||
|
||||
// Check and enqueue.
|
||||
// Set next turn with lower priority to balance workload with long
|
||||
// round time if it hits.
|
||||
isHit = e.checkAndEnqueue()
|
||||
timer.Reset(e.nextTurn(isHit, e.lastEnqueueErr != nil))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) enqueue() error {
|
||||
now := time.Now().Unix()
|
||||
|
||||
logger.Debugf("Periodic enqueuing loop by enqueuer %s: %d", pe.identity, now)
|
||||
|
||||
conn := pe.pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Set last periodic enqueue timestamp in advance to avoid duplicated enqueue actions
|
||||
if _, err := conn.Do("SET", utils.RedisKeyLastPeriodicEnqueue(pe.namespace), now); err != nil {
|
||||
return err
|
||||
// checkAndEnqueue checks if it should do enqueue and
|
||||
// does enqueue when condition hit.
|
||||
func (e *enqueuer) checkAndEnqueue() (isHit bool) {
|
||||
if isHit = e.shouldEnqueue(); isHit {
|
||||
e.enqueue()
|
||||
}
|
||||
|
||||
// Avoid schedule in the same time.
|
||||
lockKey := fmt.Sprintf("%s:%s", utils.KeyPeriod(pe.namespace), "lock")
|
||||
return
|
||||
}
|
||||
|
||||
// Use separate conn for the locker
|
||||
lockConn := pe.pool.Get()
|
||||
defer lockConn.Close()
|
||||
// nextTurn returns the next check time slot by applying
|
||||
// priorities to balance the workloads across multiple nodes
|
||||
func (e *enqueuer) nextTurn(isHit bool, enqErr bool) time.Duration {
|
||||
base := enqueuerSleep
|
||||
|
||||
// Acquire a locker with 30s expiring time
|
||||
if err := acquireLock(lockConn, lockKey, pe.identity, 30); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Debugf("Periodic enqueuer %s acquires lock", pe.identity)
|
||||
|
||||
defer func() {
|
||||
if err := releaseLock(lockConn, lockKey, pe.identity); err != nil {
|
||||
logger.Errorf("Periodic enqueuer %s releases lock failed: %s", pe.identity, err)
|
||||
} else {
|
||||
logger.Debugf("Periodic enqueuer %s releases lock", pe.identity)
|
||||
if isHit {
|
||||
// Down the hit priority by adding more waiting time
|
||||
base = base + time.Duration(3)*time.Second
|
||||
if enqErr {
|
||||
// Downgrade the priority if the node has occurred error when enqueuing
|
||||
base = base + time.Duration(5)*time.Second
|
||||
}
|
||||
} else {
|
||||
// Upgrade the priority of hitting in the next turn
|
||||
base = base - time.Duration(3)*time.Second
|
||||
}
|
||||
|
||||
// Add random waiting time [0,8)
|
||||
base = base + time.Duration(rand.Intn(5))*time.Second
|
||||
|
||||
return base
|
||||
}
|
||||
|
||||
func (e *enqueuer) enqueue() {
|
||||
conn := e.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
nowTime := time.Unix(now, 0)
|
||||
horizon := nowTime.Add(periodicEnqueuerHorizon)
|
||||
// Reset error track
|
||||
e.lastEnqueueErr = nil
|
||||
|
||||
for _, pl := range pe.policyStore.list() {
|
||||
schedule, err := cron.Parse(pl.CronSpec)
|
||||
if err != nil {
|
||||
// The cron spec should be already checked at top components.
|
||||
// Just in cases, if error occurred, ignore it
|
||||
logger.Errorf("[Ignore] Invalid corn spec in periodic policy %s %s: %s", pl.JobName, pl.PolicyID, err)
|
||||
continue
|
||||
e.policyStore.Iterate(func(id string, p *Policy) bool {
|
||||
e.scheduleNextJobs(p, conn)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// scheduleNextJobs schedules job for next time slots based on the policy
|
||||
func (e *enqueuer) scheduleNextJobs(p *Policy, conn redis.Conn) {
|
||||
nowTime := time.Unix(time.Now().Unix(), 0)
|
||||
horizon := nowTime.Add(enqueuerHorizon)
|
||||
|
||||
schedule, err := cron.Parse(p.CronSpec)
|
||||
if err != nil {
|
||||
// The cron spec should be already checked at upper layers.
|
||||
// Just in cases, if error occurred, ignore it
|
||||
e.lastEnqueueErr = err
|
||||
logger.Errorf("Invalid corn spec in periodic policy %s %s: %s", p.JobName, p.ID, err)
|
||||
} else {
|
||||
if p.JobParameters == nil {
|
||||
p.JobParameters = make(job.Parameters)
|
||||
}
|
||||
|
||||
executions := []string{}
|
||||
// Clone job parameters
|
||||
wJobParams := make(job.Parameters)
|
||||
if p.JobParameters != nil && len(p.JobParameters) > 0 {
|
||||
for k, v := range p.JobParameters {
|
||||
wJobParams[k] = v
|
||||
}
|
||||
}
|
||||
// Add extra argument for job running
|
||||
// Notes: Only for system using
|
||||
wJobParams[PeriodicExecutionMark] = true
|
||||
for t := schedule.Next(nowTime); t.Before(horizon); t = schedule.Next(t) {
|
||||
epoch := t.Unix()
|
||||
scheduledExecutionID := utils.MakeIdentifier()
|
||||
executions = append(executions, scheduledExecutionID)
|
||||
|
||||
// Create an execution (job) based on the periodic job template (policy)
|
||||
job := &work.Job{
|
||||
Name: pl.JobName,
|
||||
ID: scheduledExecutionID,
|
||||
j := &work.Job{
|
||||
Name: p.JobName,
|
||||
ID: p.ID, // Use the ID of policy to avoid scheduling duplicated periodic job executions.
|
||||
|
||||
// This is technically wrong, but this lets the bytes be identical for the same periodic job instance.
|
||||
// If we don't do this, we'd need to use a different approach -- probably giving each periodic job its own
|
||||
// history of the past 100 periodic jobs, and only scheduling a job if it's not in the history.
|
||||
EnqueuedAt: epoch,
|
||||
Args: pl.JobParameters, // Pass parameters to scheduled job here
|
||||
// Pass parameters to scheduled job here
|
||||
Args: wJobParams,
|
||||
}
|
||||
|
||||
rawJSON, err := utils.SerializeJob(job)
|
||||
rawJSON, err := utils.SerializeJob(j)
|
||||
if err != nil {
|
||||
return err
|
||||
e.lastEnqueueErr = err
|
||||
// Actually this error should not happen if the object struct is well defined
|
||||
logger.Errorf("Serialize job object for periodic job %s error: %s", p.ID, err)
|
||||
break
|
||||
}
|
||||
|
||||
// Place the time slots for the job (policy)
|
||||
// If the slot is already there, error will be returned.
|
||||
expireTime := (epoch - nowTime.Unix()) + 5
|
||||
slot := fmt.Sprintf("%s:%s@%d", utils.KeyPeriodicJobTimeSlots(pe.namespace), pl.PolicyID, epoch)
|
||||
if err := placeSlot(conn, slot, epoch, expireTime); err != nil {
|
||||
// Logged and continue
|
||||
logger.Errorf("Failed to place time slot '%s@%d' in enqueuer %s: %s", pl.PolicyID, epoch, pe.identity, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = conn.Do("ZADD", utils.RedisKeyScheduled(pe.namespace), epoch, rawJSON)
|
||||
// Persistent execution first.
|
||||
// Please pay attention that the job has not been really scheduled yet.
|
||||
// If job data is failed to persistent, then job schedule should be abandoned.
|
||||
execution := e.createExecution(p, epoch)
|
||||
eTracker, err := e.ctl.New(execution)
|
||||
if err != nil {
|
||||
return err
|
||||
e.lastEnqueueErr = err
|
||||
logger.Errorf("Save stats data of job execution '%s' error: %s", execution.Info.JobID, err)
|
||||
break
|
||||
}
|
||||
|
||||
logger.Infof("Schedule job %s:%s for policy %s at %d by enqueuer %s", job.Name, job.ID, pl.PolicyID, epoch, pe.identity)
|
||||
// Put job to the scheduled job queue
|
||||
_, err = conn.Do("ZADD", rds.RedisKeyScheduled(e.namespace), epoch, rawJSON)
|
||||
if err != nil {
|
||||
e.lastEnqueueErr = err
|
||||
logger.Errorf("Put the execution of the periodic job '%s' to the scheduled job queue error: %s", p.ID, err)
|
||||
|
||||
// Try to save the stats of new scheduled execution (job).
|
||||
pe.createExecution(pl.PolicyID, pl.JobName, scheduledExecutionID, epoch)
|
||||
|
||||
// Get web hook from the periodic job (policy)
|
||||
webHookURL, err := pe.statsManager.GetHook(pl.PolicyID)
|
||||
if err == nil {
|
||||
// Register hook for the execution
|
||||
if err := pe.statsManager.RegisterHook(scheduledExecutionID, webHookURL, false); err != nil {
|
||||
// Just logged
|
||||
logger.Errorf("Failed to register web hook '%s' for periodic job (execution) '%s' with error by enqueuer %s: %s", webHookURL, scheduledExecutionID, pe.identity, err)
|
||||
// Mark job status to be error
|
||||
// If this happened, the job stats is definitely becoming dirty data at job service side.
|
||||
// For the consumer side, the retrying of web hook may fix the problem.
|
||||
if err := eTracker.Fail(); err != nil {
|
||||
e.lastEnqueueErr = err
|
||||
logger.Errorf("Mark execution '%s' to failure status error: %s", execution.Info.JobID, err)
|
||||
}
|
||||
} else {
|
||||
// Just a warning
|
||||
logger.Warningf("Failed to retrieve web hook for periodic job (policy) %s by enqueuer %s: %s", pl.PolicyID, pe.identity, err)
|
||||
}
|
||||
}
|
||||
// Link the upstream job (policy) with the created executions
|
||||
if len(executions) > 0 {
|
||||
if err := pe.statsManager.AttachExecution(pl.PolicyID, executions...); err != nil {
|
||||
// Just logged it
|
||||
logger.Errorf("Link upstream job with executions failed in enqueuer %s: %s", pe.identity, err)
|
||||
}
|
||||
}
|
||||
// Directly use redis conn to update the periodic job (policy) status
|
||||
// Do not care the result
|
||||
conn.Do("HMSET", utils.KeyJobStats(pe.namespace, pl.PolicyID), "status", job.JobStatusScheduled, "update_time", time.Now().Unix())
|
||||
}
|
||||
|
||||
return nil
|
||||
break // Probably redis connection is broken
|
||||
}
|
||||
|
||||
logger.Debugf("Scheduled execution for periodic job %s:%s at %d", j.Name, p.ID, epoch)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) createExecution(upstreamJobID, upstreamJobName, executionID string, runAt int64) {
|
||||
execution := models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: executionID,
|
||||
JobName: upstreamJobName,
|
||||
Status: job.JobStatusPending,
|
||||
JobKind: job.JobKindScheduled,
|
||||
EnqueueTime: time.Now().Unix(),
|
||||
UpdateTime: time.Now().Unix(),
|
||||
RefLink: fmt.Sprintf("/api/v1/jobs/%s", executionID),
|
||||
// createExecution creates execution object
|
||||
func (e *enqueuer) createExecution(p *Policy, runAt int64) *job.Stats {
|
||||
eID := fmt.Sprintf("%s@%d", p.ID, runAt)
|
||||
|
||||
return &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: eID,
|
||||
JobName: p.JobName,
|
||||
WebHookURL: p.WebHookURL,
|
||||
CronSpec: p.CronSpec,
|
||||
UpstreamJobID: p.ID,
|
||||
RunAt: runAt,
|
||||
UpstreamJobID: upstreamJobID,
|
||||
Status: job.ScheduledStatus.String(),
|
||||
JobKind: job.KindScheduled, // For periodic job execution, it should be set to 'scheduled'
|
||||
EnqueueTime: time.Now().Unix(),
|
||||
RefLink: fmt.Sprintf("/api/v1/jobs/%s", eID),
|
||||
Parameters: p.JobParameters,
|
||||
},
|
||||
}
|
||||
|
||||
pe.statsManager.Save(execution)
|
||||
}
|
||||
|
||||
func (pe *periodicEnqueuer) shouldEnqueue() bool {
|
||||
conn := pe.pool.Get()
|
||||
defer conn.Close()
|
||||
func (e *enqueuer) shouldEnqueue() bool {
|
||||
conn := e.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
lastEnqueue, err := redis.Int64(conn.Do("GET", utils.RedisKeyLastPeriodicEnqueue(pe.namespace)))
|
||||
if err == redis.ErrNil {
|
||||
return true
|
||||
} else if err != nil {
|
||||
logger.Errorf("periodic_enqueuer.should_enqueue:%s\n", err)
|
||||
// Acquired a lock before doing checking
|
||||
// If failed, directly returns false.
|
||||
lockKey := rds.KeyPeriodicLock(e.namespace)
|
||||
if err := rds.AcquireLock(conn, lockKey, e.nodeID, 30); err != nil {
|
||||
logger.Errorf("acquire lock for periodic enqueuing error: %s", err)
|
||||
return false
|
||||
}
|
||||
// Acquired lock
|
||||
// For lock releasing
|
||||
defer func() {
|
||||
if err := rds.ReleaseLock(conn, lockKey, e.nodeID); err != nil {
|
||||
logger.Errorf("release lock for periodic enqueuing error: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
shouldEnq := false
|
||||
lastEnqueue, err := redis.Int64(conn.Do("GET", rds.RedisKeyLastPeriodicEnqueue(e.namespace)))
|
||||
if err != nil {
|
||||
if err.Error() != redis.ErrNil.Error() {
|
||||
// Logged error
|
||||
logger.Errorf("get timestamp of last enqueue error: %s", err)
|
||||
}
|
||||
|
||||
// Should enqueue
|
||||
shouldEnq = true
|
||||
} else {
|
||||
// Check further condition
|
||||
shouldEnq = lastEnqueue < (time.Now().Unix() - int64(enqueuerSleep/time.Minute)*60)
|
||||
}
|
||||
|
||||
if shouldEnq {
|
||||
// Set last periodic enqueue timestamp
|
||||
if _, err := conn.Do("SET", rds.RedisKeyLastPeriodicEnqueue(e.namespace), time.Now().Unix()); err != nil {
|
||||
logger.Errorf("set last periodic enqueue timestamp error: %s", err)
|
||||
}
|
||||
|
||||
// Anyway the action should be enforced
|
||||
// The negative effect of this failure is just more re-enqueues by other nodes
|
||||
return true
|
||||
}
|
||||
|
||||
return lastEnqueue < (time.Now().Unix() - int64(periodicEnqueuerSleep/time.Minute))
|
||||
}
|
||||
|
||||
func placeSlot(conn redis.Conn, key string, value interface{}, expireTime int64) error {
|
||||
args := []interface{}{key, value, "NX", "EX", expireTime}
|
||||
res, err := conn.Do("SET", args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Existing, the value can not be overrid
|
||||
if res == nil {
|
||||
return fmt.Errorf("key %s is already set with value %v", key, value)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func acquireLock(conn redis.Conn, lockerKey string, lockerID string, expireTime int64) error {
|
||||
return placeSlot(conn, lockerKey, lockerID, expireTime)
|
||||
}
|
||||
|
||||
func releaseLock(conn redis.Conn, lockerKey string, lockerID string) error {
|
||||
theID, err := redis.String(conn.Do("GET", lockerKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if theID == lockerID {
|
||||
_, err := conn.Do("DEL", lockerKey)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New("locker ID mismatch")
|
||||
return false
|
||||
}
|
||||
|
@ -15,63 +15,120 @@ package period
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
func TestPeriodicEnqueuerStartStop(t *testing.T) {
|
||||
ns := tests.GiveMeTestNamespace()
|
||||
ps := &periodicJobPolicyStore{
|
||||
lock: new(sync.RWMutex),
|
||||
policies: make(map[string]*PeriodicJobPolicy),
|
||||
}
|
||||
enqueuer := newPeriodicEnqueuer(ns, redisPool, ps, nil)
|
||||
enqueuer.start()
|
||||
<-time.After(100 * time.Millisecond)
|
||||
enqueuer.stop()
|
||||
// EnqueuerTestSuite tests functions of enqueuer
|
||||
type EnqueuerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
enqueuer *enqueuer
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func TestEnqueue(t *testing.T) {
|
||||
ns := tests.GiveMeTestNamespace()
|
||||
|
||||
pl := &PeriodicJobPolicy{
|
||||
PolicyID: "fake_ID",
|
||||
JobName: "fake_name",
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
ps := &periodicJobPolicyStore{
|
||||
lock: new(sync.RWMutex),
|
||||
policies: make(map[string]*PeriodicJobPolicy),
|
||||
}
|
||||
ps.add(pl)
|
||||
|
||||
statsManager := opm.NewRedisJobStatsManager(context.Background(), ns, redisPool)
|
||||
statsManager.Start()
|
||||
defer statsManager.Shutdown()
|
||||
|
||||
enqueuer := newPeriodicEnqueuer(ns, redisPool, ps, statsManager)
|
||||
if err := enqueuer.enqueue(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := clear(ns); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
// TestEnqueuerTestSuite is entry of go test
|
||||
func TestEnqueuerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(EnqueuerTestSuite))
|
||||
}
|
||||
|
||||
func clear(ns string) error {
|
||||
err := tests.Clear(utils.RedisKeyScheduled(ns), redisPool.Get())
|
||||
err = tests.Clear(utils.KeyJobStats(ns, "fake_ID"), redisPool.Get())
|
||||
err = tests.Clear(utils.RedisKeyLastPeriodicEnqueue(ns), redisPool.Get())
|
||||
if err != nil {
|
||||
return err
|
||||
// SetupSuite prepares the test suite
|
||||
func (suite *EnqueuerTestSuite) SetupSuite() {
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.WithValue(context.Background(), utils.NodeID, "fake_node_ID"))
|
||||
suite.cancel = cancel
|
||||
|
||||
envCtx := &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
}
|
||||
|
||||
return nil
|
||||
lcmCtl := lcm.NewController(
|
||||
envCtx,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
func(hookURL string, change *job.StatusChange) error { return nil },
|
||||
)
|
||||
suite.enqueuer = newEnqueuer(ctx, suite.namespace, suite.pool, lcmCtl)
|
||||
|
||||
suite.prepare()
|
||||
}
|
||||
|
||||
// TearDownSuite clears the test suite
|
||||
func (suite *EnqueuerTestSuite) TearDownSuite() {
|
||||
suite.cancel()
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestEnqueuer tests enqueuer
|
||||
func (suite *EnqueuerTestSuite) TestEnqueuer() {
|
||||
go func() {
|
||||
defer func() {
|
||||
suite.enqueuer.stopChan <- true
|
||||
}()
|
||||
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
key := rds.RedisKeyScheduled(suite.namespace)
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
count, err := redis.Int(conn.Do("ZCARD", key))
|
||||
require.Nil(suite.T(), err, "count scheduled: nil error expected but got %s", err)
|
||||
assert.Condition(suite.T(), func() bool {
|
||||
return count > 0
|
||||
}, "count of scheduled jobs should be greater than 0 but got %d", count)
|
||||
}()
|
||||
|
||||
err := suite.enqueuer.start()
|
||||
require.Nil(suite.T(), err, "enqueuer start: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
func (suite *EnqueuerTestSuite) prepare() {
|
||||
now := time.Now()
|
||||
minute := now.Minute()
|
||||
|
||||
coreSpec := fmt.Sprintf("30,50 %d * * * *", minute+2)
|
||||
|
||||
// Prepare one
|
||||
p := &Policy{
|
||||
ID: "fake_policy",
|
||||
JobName: job.SampleJob,
|
||||
CronSpec: coreSpec,
|
||||
}
|
||||
rawData, err := p.Serialize()
|
||||
assert.Nil(suite.T(), err, "prepare data: nil error expected but got %s", err)
|
||||
key := rds.KeyPeriodicPolicy(suite.namespace)
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_, err = conn.Do("ZADD", key, time.Now().Unix(), rawData)
|
||||
assert.Nil(suite.T(), err, "prepare policy: nil error expected but got %s", err)
|
||||
}
|
||||
|
@ -1,71 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import "github.com/goharbor/harbor/src/jobservice/models"
|
||||
|
||||
// Interface defines operations the periodic scheduler should have.
|
||||
type Interface interface {
|
||||
// Schedule the specified cron job policy.
|
||||
//
|
||||
// jobName string : The name of periodical job
|
||||
// params models.Parameters : The parameters required by the periodical job
|
||||
// cronSpec string : The periodical settings with cron format
|
||||
//
|
||||
// Returns:
|
||||
// The uuid of the cron job policy
|
||||
// The latest next trigger time
|
||||
// error if failed to schedule
|
||||
Schedule(jobName string, params models.Parameters, cronSpec string) (string, int64, error)
|
||||
|
||||
// Unschedule the specified cron job policy.
|
||||
//
|
||||
// cronJobPolicyID string: The ID of cron job policy.
|
||||
//
|
||||
// Return:
|
||||
// error if failed to unschedule
|
||||
UnSchedule(cronJobPolicyID string) error
|
||||
|
||||
// Load and cache data if needed
|
||||
//
|
||||
// Return:
|
||||
// error if failed to do
|
||||
Load() error
|
||||
|
||||
// Clear all the cron job policies.
|
||||
//
|
||||
// Return:
|
||||
// error if failed to do
|
||||
Clear() error
|
||||
|
||||
// Start to serve
|
||||
Start()
|
||||
|
||||
// Accept the pushed policy and cache it
|
||||
//
|
||||
// policy *PeriodicJobPolicy : the periodic policy being accept
|
||||
//
|
||||
// Return:
|
||||
// error if failed to do
|
||||
AcceptPeriodicPolicy(policy *PeriodicJobPolicy) error
|
||||
|
||||
// Remove the specified policy from the cache if it is existing
|
||||
//
|
||||
// policyID string : ID of the policy being removed
|
||||
//
|
||||
// Return:
|
||||
// the ptr of the being deletd policy
|
||||
RemovePeriodicPolicy(policyID string) *PeriodicJobPolicy
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// periodicJobPolicyChangeEventSchedule : Schedule periodic job policy event
|
||||
periodicJobPolicyChangeEventSchedule = "Schedule"
|
||||
// periodicJobPolicyChangeEventUnSchedule : UnSchedule periodic job policy event
|
||||
periodicJobPolicyChangeEventUnSchedule = "UnSchedule"
|
||||
)
|
||||
|
||||
// PeriodicJobPolicy ...
|
||||
type PeriodicJobPolicy struct {
|
||||
// NOTES: The 'PolicyID' should not be set when serialize this policy struct to the zset
|
||||
// because each 'Policy ID' is different and it may cause issue of losing zset unique capability.
|
||||
PolicyID string `json:"policy_id,omitempty"`
|
||||
JobName string `json:"job_name"`
|
||||
JobParameters map[string]interface{} `json:"job_params"`
|
||||
CronSpec string `json:"cron_spec"`
|
||||
}
|
||||
|
||||
// Serialize the policy to raw data.
|
||||
func (pjp *PeriodicJobPolicy) Serialize() ([]byte, error) {
|
||||
return json.Marshal(pjp)
|
||||
}
|
||||
|
||||
// DeSerialize the raw json to policy.
|
||||
func (pjp *PeriodicJobPolicy) DeSerialize(rawJSON []byte) error {
|
||||
return json.Unmarshal(rawJSON, pjp)
|
||||
}
|
||||
|
||||
// periodicJobPolicyStore is in-memory cache for the periodic job policies.
|
||||
type periodicJobPolicyStore struct {
|
||||
lock *sync.RWMutex
|
||||
policies map[string]*PeriodicJobPolicy // k-v pair and key is the policy ID
|
||||
}
|
||||
|
||||
func (ps *periodicJobPolicyStore) addAll(items []*PeriodicJobPolicy) {
|
||||
if items == nil || len(items) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
for _, item := range items {
|
||||
// Ignore the item with empty uuid
|
||||
if !utils.IsEmptyStr(item.PolicyID) {
|
||||
ps.policies[item.PolicyID] = item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *periodicJobPolicyStore) list() []*PeriodicJobPolicy {
|
||||
allItems := make([]*PeriodicJobPolicy, 0)
|
||||
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
for _, v := range ps.policies {
|
||||
allItems = append(allItems, v)
|
||||
}
|
||||
|
||||
return allItems
|
||||
}
|
||||
|
||||
func (ps *periodicJobPolicyStore) add(jobPolicy *PeriodicJobPolicy) {
|
||||
if jobPolicy == nil || utils.IsEmptyStr(jobPolicy.PolicyID) {
|
||||
return
|
||||
}
|
||||
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
ps.policies[jobPolicy.PolicyID] = jobPolicy
|
||||
}
|
||||
|
||||
func (ps *periodicJobPolicyStore) remove(policyID string) *PeriodicJobPolicy {
|
||||
if utils.IsEmptyStr(policyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
ps.lock.Lock()
|
||||
defer ps.lock.Unlock()
|
||||
|
||||
if item, ok := ps.policies[policyID]; ok {
|
||||
delete(ps.policies, policyID)
|
||||
return item
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *periodicJobPolicyStore) size() int {
|
||||
ps.lock.RLock()
|
||||
defer ps.lock.RUnlock()
|
||||
|
||||
return len(ps.policies)
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package period
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPeriodicJobPolicy(t *testing.T) {
|
||||
p := createPolicy("")
|
||||
|
||||
data, err := p.Serialize()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := p.DeSerialize(data); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeriodicJobPolicyStore(t *testing.T) {
|
||||
ps := &periodicJobPolicyStore{
|
||||
lock: new(sync.RWMutex),
|
||||
policies: make(map[string]*PeriodicJobPolicy),
|
||||
}
|
||||
|
||||
ps.add(createPolicy("fake_ID_Steven"))
|
||||
if ps.size() != 1 {
|
||||
t.Errorf("expect size 1 but got '%d'\n", ps.size())
|
||||
}
|
||||
pl := make([]*PeriodicJobPolicy, 0)
|
||||
pl = append(pl, createPolicy(""))
|
||||
pl = append(pl, createPolicy(""))
|
||||
ps.addAll(pl)
|
||||
if ps.size() != 3 {
|
||||
t.Fatalf("expect size 3 but got '%d'\n", ps.size())
|
||||
}
|
||||
|
||||
l := ps.list()
|
||||
if l == nil || len(l) != 3 {
|
||||
t.Fatal("expect a policy list with 3 items but got invalid list")
|
||||
}
|
||||
|
||||
rp := ps.remove("fake_ID_Steven")
|
||||
if rp == nil {
|
||||
t.Fatal("expect none nil policy object but got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func createPolicy(id string) *PeriodicJobPolicy {
|
||||
theID := id
|
||||
if theID == "" {
|
||||
theID = fmt.Sprintf("fake_ID_%d", time.Now().UnixNano()+int64(rand.Intn(1000)))
|
||||
}
|
||||
p := &PeriodicJobPolicy{
|
||||
PolicyID: theID,
|
||||
JobName: "fake_job",
|
||||
JobParameters: make(map[string]interface{}),
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
p.JobParameters["image"] = "testing:v1"
|
||||
|
||||
return p
|
||||
}
|
327
src/jobservice/period/policy_store.go
Normal file
327
src/jobservice/period/policy_store.go
Normal file
@ -0,0 +1,327 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/utils"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/robfig/cron"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// changeEventSchedule : Schedule periodic job policy event
|
||||
changeEventSchedule = "Schedule"
|
||||
// changeEventUnSchedule : UnSchedule periodic job policy event
|
||||
changeEventUnSchedule = "UnSchedule"
|
||||
)
|
||||
|
||||
// Policy ...
|
||||
type Policy struct {
|
||||
// Policy can be treated as job template of periodic job.
|
||||
// The info of policy will be copied into the scheduled job executions for the periodic job.
|
||||
ID string `json:"id"`
|
||||
JobName string `json:"job_name"`
|
||||
CronSpec string `json:"cron_spec"`
|
||||
JobParameters map[string]interface{} `json:"job_params,omitempty"`
|
||||
WebHookURL string `json:"web_hook_url,omitempty"`
|
||||
}
|
||||
|
||||
// Serialize the policy to raw data.
|
||||
func (p *Policy) Serialize() ([]byte, error) {
|
||||
return json.Marshal(p)
|
||||
}
|
||||
|
||||
// DeSerialize the raw json to policy.
|
||||
func (p *Policy) DeSerialize(rawJSON []byte) error {
|
||||
return json.Unmarshal(rawJSON, p)
|
||||
}
|
||||
|
||||
// Validate the policy
|
||||
func (p *Policy) Validate() error {
|
||||
if utils.IsEmptyStr(p.ID) {
|
||||
return errors.New("missing ID in the periodic job policy object")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(p.JobName) {
|
||||
return errors.New("missing job name in the periodic job policy object")
|
||||
}
|
||||
|
||||
if !utils.IsEmptyStr(p.WebHookURL) {
|
||||
if !utils.IsValidURL(p.WebHookURL) {
|
||||
return fmt.Errorf("bad web hook URL: %s", p.WebHookURL)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := cron.Parse(p.CronSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// policyStore is in-memory cache for the periodic job policies.
|
||||
type policyStore struct {
|
||||
// k-v pair and key is the policy ID
|
||||
hash *sync.Map
|
||||
namespace string
|
||||
context context.Context
|
||||
pool *redis.Pool
|
||||
// For stop
|
||||
stopChan chan bool
|
||||
}
|
||||
|
||||
// message is designed for sub/pub messages
|
||||
type message struct {
|
||||
Event string `json:"event"`
|
||||
Data *Policy `json:"data"`
|
||||
}
|
||||
|
||||
// newPolicyStore is constructor of policyStore
|
||||
func newPolicyStore(ctx context.Context, ns string, pool *redis.Pool) *policyStore {
|
||||
return &policyStore{
|
||||
hash: new(sync.Map),
|
||||
context: ctx,
|
||||
namespace: ns,
|
||||
pool: pool,
|
||||
stopChan: make(chan bool, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Blocking call
|
||||
func (ps *policyStore) serve() (err error) {
|
||||
defer func() {
|
||||
logger.Info("Periodical job policy store is stopped")
|
||||
}()
|
||||
|
||||
conn := ps.pool.Get()
|
||||
psc := redis.PubSubConn{
|
||||
Conn: conn,
|
||||
}
|
||||
defer func() {
|
||||
_ = psc.Close()
|
||||
}()
|
||||
|
||||
// Subscribe channel
|
||||
err = psc.Subscribe(redis.Args{}.AddFlat(rds.KeyPeriodicNotification(ps.namespace))...)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Channels for sub/pub ctl
|
||||
errChan := make(chan error, 1)
|
||||
done := make(chan bool, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
switch res := psc.Receive().(type) {
|
||||
case error:
|
||||
errChan <- fmt.Errorf("redis sub/pub chan error: %s", res.(error).Error())
|
||||
break
|
||||
case redis.Message:
|
||||
m := &message{}
|
||||
if err := json.Unmarshal(res.Data, m); err != nil {
|
||||
// logged
|
||||
logger.Errorf("Read invalid message: %s\n", res.Data)
|
||||
break
|
||||
}
|
||||
if err := ps.sync(m); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
break
|
||||
case redis.Subscription:
|
||||
switch res.Kind {
|
||||
case "subscribe":
|
||||
logger.Infof("Subscribe redis channel %s", res.Channel)
|
||||
break
|
||||
case "unsubscribe":
|
||||
// Unsubscribe all, means main goroutine is exiting
|
||||
logger.Infof("Unsubscribe redis channel %s", res.Channel)
|
||||
done <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Periodical job policy store is serving with policy auto sync enabled")
|
||||
defer func() {
|
||||
var unSubErr error
|
||||
defer func() {
|
||||
// Merge errors
|
||||
finalErrs := make([]string, 0)
|
||||
if unSubErr != nil {
|
||||
finalErrs = append(finalErrs, unSubErr.Error())
|
||||
}
|
||||
if err != nil {
|
||||
finalErrs = append(finalErrs, err.Error())
|
||||
}
|
||||
|
||||
if len(finalErrs) > 0 {
|
||||
// Override returned err or do nothing
|
||||
err = errors.New(strings.Join(finalErrs, ";"))
|
||||
}
|
||||
}()
|
||||
// Unsubscribe all
|
||||
if err := psc.Unsubscribe(); err != nil {
|
||||
logger.Errorf("unsubscribe: %s", err)
|
||||
}
|
||||
// Confirm result
|
||||
// Add timeout in case unsubscribe failed
|
||||
select {
|
||||
case unSubErr = <-errChan:
|
||||
return
|
||||
case <-done:
|
||||
return
|
||||
case <-time.After(30 * time.Second):
|
||||
unSubErr = errors.New("unsubscribe time out")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
// blocking here
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err = psc.Ping("ping!")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case <-ps.stopChan:
|
||||
return nil
|
||||
case err = <-errChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sync policy with backend list
|
||||
func (ps *policyStore) sync(m *message) error {
|
||||
if m == nil {
|
||||
return errors.New("nil message")
|
||||
}
|
||||
|
||||
if m.Data == nil {
|
||||
return errors.New("missing data in the policy sync message")
|
||||
}
|
||||
|
||||
switch m.Event {
|
||||
case changeEventSchedule:
|
||||
if err := ps.add(m.Data); err != nil {
|
||||
return fmt.Errorf("failed to sync scheduled policy %s: %s", m.Data.ID, err)
|
||||
}
|
||||
case changeEventUnSchedule:
|
||||
removed := ps.remove(m.Data.ID)
|
||||
if removed == nil {
|
||||
return fmt.Errorf("failed to sync unscheduled policy %s", m.Data.ID)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("message %s is not supported", m.Event)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load all the policies from the backend to store
|
||||
func (ps *policyStore) load() error {
|
||||
conn := ps.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
bytes, err := redis.Values(conn.Do("ZRANGE", rds.KeyPeriodicPolicy(ps.namespace), 0, -1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count := 0
|
||||
for i, l := 0, len(bytes); i < l; i++ {
|
||||
rawPolicy := bytes[i].([]byte)
|
||||
p := &Policy{}
|
||||
|
||||
if err := p.DeSerialize(rawPolicy); err != nil {
|
||||
// Ignore error which means the policy data is not valid
|
||||
// Only logged
|
||||
logger.Errorf("malform policy: %s; error: %s\n", rawPolicy, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add to cache store
|
||||
if err := ps.add(p); err != nil {
|
||||
// Only logged
|
||||
logger.Errorf("cache periodic policies error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
logger.Debugf("Load periodic job policy: %s", string(rawPolicy))
|
||||
}
|
||||
|
||||
logger.Infof("Load %d periodic job policies", count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add one or more policy
|
||||
func (ps *policyStore) add(item *Policy) error {
|
||||
if item == nil {
|
||||
return errors.New("nil policy to add")
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(item.ID) {
|
||||
return errors.New("malform policy to add")
|
||||
}
|
||||
|
||||
v, _ := ps.hash.LoadOrStore(item.ID, item)
|
||||
if v == nil {
|
||||
return fmt.Errorf("failed to add policy: %s", item.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Iterate all the policies in the store
|
||||
func (ps *policyStore) Iterate(f func(id string, p *Policy) bool) {
|
||||
ps.hash.Range(func(k, v interface{}) bool {
|
||||
return f(k.(string), v.(*Policy))
|
||||
})
|
||||
}
|
||||
|
||||
// Remove the specified policy from the store
|
||||
func (ps *policyStore) remove(policyID string) *Policy {
|
||||
if utils.IsEmptyStr(policyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if v, ok := ps.hash.Load(policyID); ok {
|
||||
ps.hash.Delete(policyID)
|
||||
return v.(*Policy)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
153
src/jobservice/period/policy_store_test.go
Normal file
153
src/jobservice/period/policy_store_test.go
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package period
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/goharbor/harbor/src/jobservice/common/rds"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PolicyStoreTestSuite tests functions of policy store
|
||||
type PolicyStoreTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
store *policyStore
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// TestPolicyStoreTestSuite is entry of go test
|
||||
func TestPolicyStoreTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(PolicyStoreTestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *PolicyStoreTestSuite) SetupSuite() {
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
suite.cancel = cancel
|
||||
|
||||
suite.store = newPolicyStore(ctx, suite.namespace, suite.pool)
|
||||
}
|
||||
|
||||
// TearDownSuite clears the test suite
|
||||
func (suite *PolicyStoreTestSuite) TearDownSuite() {
|
||||
suite.cancel()
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_ = tests.ClearAll(suite.namespace, conn)
|
||||
}
|
||||
|
||||
// TestStore tests policy store serve
|
||||
func (suite *PolicyStoreTestSuite) TestServe() {
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
suite.store.stopChan <- true
|
||||
assert.Nil(suite.T(), err, "serve exit: nil error expected but got %s", err)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
err = suite.store.serve()
|
||||
}()
|
||||
<-time.After(1 * time.Second)
|
||||
}
|
||||
|
||||
// TestLoad tests load policy from backend
|
||||
func (suite *PolicyStoreTestSuite) TestLoad() {
|
||||
// Prepare one
|
||||
p := &Policy{
|
||||
ID: "fake_policy",
|
||||
JobName: job.SampleJob,
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
rawData, err := p.Serialize()
|
||||
assert.Nil(suite.T(), err, "prepare data: nil error expected but got %s", err)
|
||||
key := rds.KeyPeriodicPolicy(suite.namespace)
|
||||
|
||||
conn := suite.pool.Get()
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
|
||||
_, err = conn.Do("ZADD", key, time.Now().Unix(), rawData)
|
||||
assert.Nil(suite.T(), err, "add data: nil error expected but got %s", err)
|
||||
|
||||
err = suite.store.load()
|
||||
assert.Nil(suite.T(), err, "load: nil error expected but got %s", err)
|
||||
|
||||
p1 := &Policy{
|
||||
ID: "fake_policy_1",
|
||||
JobName: job.SampleJob,
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
m := &message{
|
||||
Event: changeEventSchedule,
|
||||
Data: p1,
|
||||
}
|
||||
err = suite.store.sync(m)
|
||||
assert.Nil(suite.T(), err, "sync schedule: nil error expected but got %s", err)
|
||||
|
||||
count := 0
|
||||
suite.store.Iterate(func(id string, p *Policy) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(suite.T(), 2, count, "expected 2 policies but got %d", count)
|
||||
|
||||
m1 := &message{
|
||||
Event: changeEventUnSchedule,
|
||||
Data: p1,
|
||||
}
|
||||
err = suite.store.sync(m1)
|
||||
assert.Nil(suite.T(), err, "sync unschedule: nil error expected but got %s", err)
|
||||
|
||||
count = 0
|
||||
suite.store.Iterate(func(id string, p *Policy) bool {
|
||||
count++
|
||||
return true
|
||||
})
|
||||
assert.Equal(suite.T(), 1, count, "expected 1 policies but got %d", count)
|
||||
}
|
||||
|
||||
// TestPolicy tests policy itself
|
||||
func (suite *PolicyStoreTestSuite) TestPolicy() {
|
||||
p1 := &Policy{
|
||||
ID: "fake_policy_1",
|
||||
JobName: job.SampleJob,
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
|
||||
bytes, err := p1.Serialize()
|
||||
assert.Nil(suite.T(), err, "policy serialize: nil error expected but got %s", err)
|
||||
p2 := &Policy{}
|
||||
err = p2.DeSerialize(bytes)
|
||||
assert.Nil(suite.T(), err, "policy deserialize: nil error expected but got %s", err)
|
||||
assert.Equal(suite.T(), "5 * * * * *", p2.CronSpec)
|
||||
err = p2.Validate()
|
||||
assert.Nil(suite.T(), err, "policy validate: nil error expected but got %s", err)
|
||||
}
|
@ -1,349 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/robfig/cron"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
// EventSchedulePeriodicPolicy is for scheduling periodic policy event
|
||||
EventSchedulePeriodicPolicy = "schedule"
|
||||
// EventUnSchedulePeriodicPolicy is for unscheduling periodic policy event
|
||||
EventUnSchedulePeriodicPolicy = "unschedule"
|
||||
)
|
||||
|
||||
// RedisPeriodicScheduler manages the periodic scheduling policies.
|
||||
type RedisPeriodicScheduler struct {
|
||||
context *env.Context
|
||||
redisPool *redis.Pool
|
||||
namespace string
|
||||
pstore *periodicJobPolicyStore
|
||||
enqueuer *periodicEnqueuer
|
||||
}
|
||||
|
||||
// NewRedisPeriodicScheduler is constructor of RedisPeriodicScheduler
|
||||
func NewRedisPeriodicScheduler(ctx *env.Context, namespace string, redisPool *redis.Pool, statsManager opm.JobStatsManager) *RedisPeriodicScheduler {
|
||||
pstore := &periodicJobPolicyStore{
|
||||
lock: new(sync.RWMutex),
|
||||
policies: make(map[string]*PeriodicJobPolicy),
|
||||
}
|
||||
enqueuer := newPeriodicEnqueuer(namespace, redisPool, pstore, statsManager)
|
||||
|
||||
return &RedisPeriodicScheduler{
|
||||
context: ctx,
|
||||
redisPool: redisPool,
|
||||
namespace: namespace,
|
||||
pstore: pstore,
|
||||
enqueuer: enqueuer,
|
||||
}
|
||||
}
|
||||
|
||||
// Start to serve
|
||||
func (rps *RedisPeriodicScheduler) Start() {
|
||||
defer func() {
|
||||
logger.Info("Redis scheduler is stopped")
|
||||
}()
|
||||
|
||||
// Load existing periodic job policies
|
||||
if err := rps.Load(); err != nil {
|
||||
// exit now
|
||||
rps.context.ErrorChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// start enqueuer
|
||||
rps.enqueuer.start()
|
||||
defer rps.enqueuer.stop()
|
||||
logger.Info("Redis scheduler is started")
|
||||
|
||||
// blocking here
|
||||
<-rps.context.SystemContext.Done()
|
||||
}
|
||||
|
||||
// Schedule is implementation of the same method in period.Interface
|
||||
func (rps *RedisPeriodicScheduler) Schedule(jobName string, params models.Parameters, cronSpec string) (string, int64, error) {
|
||||
if utils.IsEmptyStr(jobName) {
|
||||
return "", 0, errors.New("empty job name is not allowed")
|
||||
}
|
||||
if utils.IsEmptyStr(cronSpec) {
|
||||
return "", 0, errors.New("cron spec is not set")
|
||||
}
|
||||
|
||||
// Get next run time
|
||||
schedule, err := cron.Parse(cronSpec)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
// Although the ZSET can guarantee no duplicated items, we still need to check the existing
|
||||
// of the job policy to avoid publish duplicated ones to other nodes as we
|
||||
// use transaction commands.
|
||||
jobPolicy := &PeriodicJobPolicy{
|
||||
JobName: jobName,
|
||||
JobParameters: params,
|
||||
CronSpec: cronSpec,
|
||||
}
|
||||
// Serialize data
|
||||
rawJSON, err := jobPolicy.Serialize()
|
||||
if err != nil {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
// Check existing
|
||||
// If existing, treat as a succeed submitting and return the exitsing id
|
||||
if score, ok := rps.exists(string(rawJSON)); ok {
|
||||
// Ignore error
|
||||
id, _ := rps.getIDByScore(score)
|
||||
return "", 0, errs.ConflictError(id)
|
||||
}
|
||||
|
||||
uuid, score := utils.MakePeriodicPolicyUUID()
|
||||
// Set back policy ID
|
||||
jobPolicy.PolicyID = uuid
|
||||
notification := &models.Message{
|
||||
Event: EventSchedulePeriodicPolicy,
|
||||
Data: jobPolicy,
|
||||
}
|
||||
rawJSON2, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
// Save to redis db and publish notification via redis transaction
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.Send("MULTI")
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
err = conn.Send("ZADD", utils.KeyPeriodicPolicy(rps.namespace), score, rawJSON)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
err = conn.Send("ZADD", utils.KeyPeriodicPolicyScore(rps.namespace), score, uuid)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(rps.namespace), rawJSON2)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
if _, err := conn.Do("EXEC"); err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return uuid, schedule.Next(time.Now()).Unix(), nil
|
||||
}
|
||||
|
||||
// UnSchedule is implementation of the same method in period.Interface
|
||||
func (rps *RedisPeriodicScheduler) UnSchedule(cronJobPolicyID string) error {
|
||||
if utils.IsEmptyStr(cronJobPolicyID) {
|
||||
return errors.New("cron job policy ID is empty")
|
||||
}
|
||||
|
||||
score, err := rps.getScoreByID(cronJobPolicyID)
|
||||
if err == redis.ErrNil {
|
||||
return errs.NoObjectFoundError(err.Error())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notification := &models.Message{
|
||||
Event: EventUnSchedulePeriodicPolicy,
|
||||
Data: &PeriodicJobPolicy{
|
||||
PolicyID: cronJobPolicyID, // Only ID required
|
||||
},
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// REM from redis db
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.Send("MULTI")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.Send("ZREMRANGEBYSCORE", utils.KeyPeriodicPolicy(rps.namespace), score, score) // Accurately remove the item with the specified score
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.Send("ZREMRANGEBYSCORE", utils.KeyPeriodicPolicyScore(rps.namespace), score, score) // Remove key score mapping
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(rps.namespace), rawJSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = conn.Do("EXEC")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Load data from zset
|
||||
func (rps *RedisPeriodicScheduler) Load() error {
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Let's build key score mapping locally first
|
||||
bytes, err := redis.MultiBulk(conn.Do("ZRANGE", utils.KeyPeriodicPolicyScore(rps.namespace), 0, -1, "WITHSCORES"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyScoreMap := make(map[int64]string)
|
||||
for i, l := 0, len(bytes); i < l; i = i + 2 {
|
||||
pid := string(bytes[i].([]byte))
|
||||
rawScore := bytes[i+1].([]byte)
|
||||
score, err := strconv.ParseInt(string(rawScore), 10, 64)
|
||||
if err != nil {
|
||||
// Ignore
|
||||
continue
|
||||
}
|
||||
keyScoreMap[score] = pid
|
||||
}
|
||||
|
||||
bytes, err = redis.MultiBulk(conn.Do("ZRANGE", utils.KeyPeriodicPolicy(rps.namespace), 0, -1, "WITHSCORES"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allPeriodicPolicies := make([]*PeriodicJobPolicy, 0, len(bytes)/2)
|
||||
for i, l := 0, len(bytes); i < l; i = i + 2 {
|
||||
rawPolicy := bytes[i].([]byte)
|
||||
rawScore := bytes[i+1].([]byte)
|
||||
policy := &PeriodicJobPolicy{}
|
||||
|
||||
if err := policy.DeSerialize(rawPolicy); err != nil {
|
||||
// Ignore error which means the policy data is not valid
|
||||
// Only logged
|
||||
logger.Warningf("failed to deserialize periodic policy with error:%s; raw data: %s\n", err, rawPolicy)
|
||||
continue
|
||||
}
|
||||
score, err := strconv.ParseInt(string(rawScore), 10, 64)
|
||||
if err != nil {
|
||||
// Ignore error which means the policy data is not valid
|
||||
// Only logged
|
||||
logger.Warningf("failed to parse the score of the periodic policy with error:%s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set back the policy ID
|
||||
if pid, ok := keyScoreMap[score]; ok {
|
||||
policy.PolicyID = pid
|
||||
} else {
|
||||
// Something wrong, should not be happened
|
||||
// ignore here
|
||||
continue
|
||||
}
|
||||
|
||||
allPeriodicPolicies = append(allPeriodicPolicies, policy)
|
||||
|
||||
logger.Infof("Load periodic job policy %s for job %s: %s", policy.PolicyID, policy.JobName, policy.CronSpec)
|
||||
}
|
||||
|
||||
if len(allPeriodicPolicies) > 0 {
|
||||
rps.pstore.addAll(allPeriodicPolicies)
|
||||
}
|
||||
|
||||
logger.Infof("Load %d periodic job policies", len(allPeriodicPolicies))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear is implementation of the same method in period.Interface
|
||||
func (rps *RedisPeriodicScheduler) Clear() error {
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
_, err := conn.Do("ZREMRANGEBYRANK", utils.KeyPeriodicPolicy(rps.namespace), 0, -1)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// AcceptPeriodicPolicy is implementation of the same method in period.Interface
|
||||
func (rps *RedisPeriodicScheduler) AcceptPeriodicPolicy(policy *PeriodicJobPolicy) error {
|
||||
if policy == nil || utils.IsEmptyStr(policy.PolicyID) {
|
||||
return errors.New("nil periodic policy")
|
||||
}
|
||||
|
||||
rps.pstore.add(policy)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePeriodicPolicy is implementation of the same method in period.Interface
|
||||
func (rps *RedisPeriodicScheduler) RemovePeriodicPolicy(policyID string) *PeriodicJobPolicy {
|
||||
if utils.IsEmptyStr(policyID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return rps.pstore.remove(policyID)
|
||||
}
|
||||
|
||||
func (rps *RedisPeriodicScheduler) exists(rawPolicy string) (int64, bool) {
|
||||
if utils.IsEmptyStr(rawPolicy) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
count, err := redis.Int64(conn.Do("ZSCORE", utils.KeyPeriodicPolicy(rps.namespace), rawPolicy))
|
||||
return count, err == nil
|
||||
}
|
||||
|
||||
func (rps *RedisPeriodicScheduler) getScoreByID(id string) (int64, error) {
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
return redis.Int64(conn.Do("ZSCORE", utils.KeyPeriodicPolicyScore(rps.namespace), id))
|
||||
}
|
||||
|
||||
func (rps *RedisPeriodicScheduler) getIDByScore(score int64) (string, error) {
|
||||
conn := rps.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
ids, err := redis.Strings(conn.Do("ZRANGEBYSCORE", utils.KeyPeriodicPolicyScore(rps.namespace), score, score))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ids[0], nil
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package period
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
var redisPool = tests.GiveMeRedisPool()
|
||||
|
||||
func TestScheduler(t *testing.T) {
|
||||
statsManager := opm.NewRedisJobStatsManager(context.Background(), tests.GiveMeTestNamespace(), redisPool)
|
||||
statsManager.Start()
|
||||
defer statsManager.Shutdown()
|
||||
|
||||
scheduler := myPeriodicScheduler(statsManager)
|
||||
params := make(map[string]interface{})
|
||||
params["image"] = "testing:v1"
|
||||
id, runAt, err := scheduler.Schedule("fake_job", params, "5 * * * * *")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if time.Now().Unix() >= runAt {
|
||||
t.Fatal("the running at time of scheduled job should be after now, but seems not")
|
||||
}
|
||||
|
||||
if err := scheduler.Load(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if scheduler.pstore.size() != 1 {
|
||||
t.Fatalf("expect 1 item in pstore but got '%d'\n", scheduler.pstore.size())
|
||||
}
|
||||
|
||||
if err := scheduler.UnSchedule(id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := scheduler.Clear(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = tests.Clear(utils.KeyPeriodicPolicy(tests.GiveMeTestNamespace()), redisPool.Get())
|
||||
err = tests.Clear(utils.KeyPeriodicPolicyScore(tests.GiveMeTestNamespace()), redisPool.Get())
|
||||
err = tests.Clear(utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), redisPool.Get())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubFunc(t *testing.T) {
|
||||
statsManager := opm.NewRedisJobStatsManager(context.Background(), tests.GiveMeTestNamespace(), redisPool)
|
||||
statsManager.Start()
|
||||
defer statsManager.Shutdown()
|
||||
|
||||
scheduler := myPeriodicScheduler(statsManager)
|
||||
p := &PeriodicJobPolicy{
|
||||
PolicyID: "fake_ID",
|
||||
JobName: "fake_job",
|
||||
CronSpec: "5 * * * * *",
|
||||
}
|
||||
if err := scheduler.AcceptPeriodicPolicy(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if scheduler.pstore.size() != 1 {
|
||||
t.Fatalf("expect 1 item in pstore but got '%d' after accepting \n", scheduler.pstore.size())
|
||||
}
|
||||
if rmp := scheduler.RemovePeriodicPolicy("fake_ID"); rmp == nil {
|
||||
t.Fatal("expect none nil object returned after removing but got nil")
|
||||
}
|
||||
if scheduler.pstore.size() != 0 {
|
||||
t.Fatalf("expect 0 item in pstore but got '%d' \n", scheduler.pstore.size())
|
||||
}
|
||||
}
|
||||
|
||||
func myPeriodicScheduler(statsManager opm.JobStatsManager) *RedisPeriodicScheduler {
|
||||
sysCtx := context.Background()
|
||||
ctx := &env.Context{
|
||||
SystemContext: sysCtx,
|
||||
WG: new(sync.WaitGroup),
|
||||
ErrorChan: make(chan error, 1),
|
||||
}
|
||||
|
||||
return NewRedisPeriodicScheduler(ctx, tests.GiveMeTestNamespace(), redisPool, statsManager)
|
||||
}
|
47
src/jobservice/period/scheduler.go
Normal file
47
src/jobservice/period/scheduler.go
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
// Scheduler defines operations the periodic scheduler should have.
|
||||
type Scheduler interface {
|
||||
// Start to serve periodic job scheduling process
|
||||
//
|
||||
// Returns:
|
||||
// error if any problems happened
|
||||
Start() error
|
||||
|
||||
// Stop the working periodic job scheduling process
|
||||
//
|
||||
// Returns;
|
||||
// error if any problems happened
|
||||
Stop() error
|
||||
|
||||
// Schedule the specified cron job policy.
|
||||
//
|
||||
// policy *Policy : The job template of the scheduling periodical jobs
|
||||
//
|
||||
// Returns:
|
||||
// int64 the numeric id of policy
|
||||
// error if failed to schedule
|
||||
Schedule(policy *Policy) (int64, error)
|
||||
|
||||
// Unschedule the specified cron job policy.
|
||||
//
|
||||
// policyID string: The ID of cron job policy.
|
||||
//
|
||||
// Return:
|
||||
// error if failed to unschedule
|
||||
UnSchedule(policyID string) error
|
||||
}
|
@ -1,109 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
// Sweeper take charge of clearing the outdated data such as scheduled jobs etc..
|
||||
// Currently, only used in redis worker pool.
|
||||
type Sweeper struct {
|
||||
redisPool *redis.Pool
|
||||
client *work.Client
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewSweeper is constructor of Sweeper.
|
||||
func NewSweeper(namespace string, pool *redis.Pool, client *work.Client) *Sweeper {
|
||||
return &Sweeper{
|
||||
namespace: namespace,
|
||||
redisPool: pool,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// ClearOutdatedScheduledJobs clears the outdated scheduled jobs.
|
||||
// Try best to do
|
||||
func (s *Sweeper) ClearOutdatedScheduledJobs() error {
|
||||
// Check if other workpool has done the action
|
||||
conn := s.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
// Lock
|
||||
r, err := conn.Do("SET", utils.KeyPeriodicLock(s.namespace), time.Now().Unix(), "EX", 30, "NX")
|
||||
defer func() {
|
||||
// Make sure it can be unlocked if it is not expired yet
|
||||
if _, err := conn.Do("DEL", utils.KeyPeriodicLock(s.namespace)); err != nil {
|
||||
logger.Errorf("Unlock key '%s' failed with error: %s\n", utils.KeyPeriodicLock(s.namespace), err.Error())
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if r == nil {
|
||||
// Action is already locked by other workerpool
|
||||
logger.Info("Ignore clear outdated scheduled jobs")
|
||||
return nil
|
||||
}
|
||||
|
||||
nowEpoch := time.Now().Unix()
|
||||
jobScores, err := utils.GetZsetByScore(s.redisPool, utils.RedisKeyScheduled(s.namespace), []int64{0, nowEpoch})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allErrors := make([]error, 0)
|
||||
for _, jobScore := range jobScores {
|
||||
j, err := utils.DeSerializeJob(jobScore.JobBytes)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = s.client.DeleteScheduledJob(jobScore.Score, j.ID); err != nil {
|
||||
allErrors = append(allErrors, err)
|
||||
}
|
||||
|
||||
logger.Infof("Clear outdated scheduled job: %s run at %#v\n", j.ID, time.Unix(jobScore.Score, 0).String())
|
||||
}
|
||||
|
||||
// Unlock
|
||||
if len(allErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(allErrors) == 1 {
|
||||
return allErrors[0]
|
||||
}
|
||||
|
||||
errorSummary := allErrors[0].Error()
|
||||
for index, e := range allErrors {
|
||||
if index == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
errorSummary = fmt.Sprintf("%s, %s", errorSummary, e)
|
||||
}
|
||||
return fmt.Errorf("%s", errorSummary)
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package period
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
func TestSweeper(t *testing.T) {
|
||||
epoch := time.Now().Unix() - 1000
|
||||
if err := createFakeScheduledJob(epoch); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ns := tests.GiveMeTestNamespace()
|
||||
sweeper := NewSweeper(ns, redisPool, work.NewClient(ns, redisPool))
|
||||
if err := sweeper.ClearOutdatedScheduledJobs(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := tests.Clear(utils.RedisKeyScheduled(ns), redisPool.Get())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createFakeScheduledJob(runAt int64) error {
|
||||
fakeJob := make(map[string]interface{})
|
||||
fakeJob["name"] = "fake_periodic_job"
|
||||
fakeJob["id"] = "fake_job_id"
|
||||
fakeJob["t"] = runAt
|
||||
fakeJob["args"] = make(map[string]interface{})
|
||||
|
||||
rawJSON, err := json.Marshal(&fakeJob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
conn := redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
_, err = conn.Do("ZADD", utils.RedisKeyScheduled(tests.GiveMeTestNamespace()), runAt, rawJSON)
|
||||
return err
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
)
|
||||
|
||||
func TestDeDuplicator(t *testing.T) {
|
||||
jobName := "fake_job"
|
||||
jobParams := map[string]interface{}{
|
||||
"image": "ubuntu:latest",
|
||||
}
|
||||
|
||||
rdd := NewRedisDeDuplicator(tests.GiveMeTestNamespace(), rPool)
|
||||
|
||||
if err := rdd.Unique(jobName, jobParams); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := rdd.Unique(jobName, jobParams); err == nil {
|
||||
t.Errorf("expect duplicated error but got nil error")
|
||||
}
|
||||
|
||||
if err := rdd.DelUniqueSign(jobName, jobParams); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import "github.com/goharbor/harbor/src/jobservice/models"
|
||||
|
||||
// Interface for worker pool.
|
||||
// More like a driver to transparent the lower queue.
|
||||
type Interface interface {
|
||||
// Start to serve
|
||||
//
|
||||
// Return:
|
||||
// error if failed to start
|
||||
Start() error
|
||||
|
||||
// Register job to the pool.
|
||||
//
|
||||
// name string : job name for referring
|
||||
// job interface{}: job handler which must implement the job.Interface.
|
||||
//
|
||||
// Return:
|
||||
// error if failed to register
|
||||
RegisterJob(name string, job interface{}) error
|
||||
|
||||
// Register multiple jobs.
|
||||
//
|
||||
// jobs map[string]interface{}: job map, key is job name and value is job handler.
|
||||
//
|
||||
// Return:
|
||||
// error if failed to register
|
||||
RegisterJobs(jobs map[string]interface{}) error
|
||||
|
||||
// Enqueue job
|
||||
//
|
||||
// jobName string : the name of enqueuing job
|
||||
// params models.Parameters : parameters of enqueuing job
|
||||
// isUnique bool : specify if duplicated job will be discarded
|
||||
//
|
||||
// Returns:
|
||||
// models.JobStats: the stats of enqueuing job if succeed
|
||||
// error : if failed to enqueue
|
||||
Enqueue(jobName string, params models.Parameters, isUnique bool) (models.JobStats, error)
|
||||
|
||||
// Schedule job to run after the specified interval (seconds).
|
||||
//
|
||||
// jobName string : the name of enqueuing job
|
||||
// runAfterSeconds uint64 : the waiting interval with seconds
|
||||
// params models.Parameters : parameters of enqueuing job
|
||||
// isUnique bool : specify if duplicated job will be discarded
|
||||
//
|
||||
// Returns:
|
||||
// models.JobStats: the stats of enqueuing job if succeed
|
||||
// error : if failed to enqueue
|
||||
Schedule(jobName string, params models.Parameters, runAfterSeconds uint64, isUnique bool) (models.JobStats, error)
|
||||
|
||||
// Schedule the job periodically running.
|
||||
//
|
||||
// jobName string : the name of enqueuing job
|
||||
// params models.Parameters : parameters of enqueuing job
|
||||
// cronSetting string : the periodic duration with cron style like '0 * * * * *'
|
||||
//
|
||||
// Returns:
|
||||
// models.JobStats: the stats of enqueuing job if succeed
|
||||
// error : if failed to enqueue
|
||||
PeriodicallyEnqueue(jobName string, params models.Parameters, cronSetting string) (models.JobStats, error)
|
||||
|
||||
// Return the status info of the pool.
|
||||
//
|
||||
// Returns:
|
||||
// models.JobPoolStats : the stats info of all running pools
|
||||
// error : failed to check
|
||||
Stats() (models.JobPoolStats, error)
|
||||
|
||||
// Check if the job has been already registered.
|
||||
//
|
||||
// name string : name of job
|
||||
//
|
||||
// Returns:
|
||||
// interface{} : the job type of the known job if it's existing
|
||||
// bool : if the known job requires parameters
|
||||
IsKnownJob(name string) (interface{}, bool)
|
||||
|
||||
// Validate the parameters of the known job
|
||||
//
|
||||
// jobType interface{} : type of known job
|
||||
// params map[string]interface{} : parameters of known job
|
||||
//
|
||||
// Return:
|
||||
// error if parameters are not valid
|
||||
|
||||
ValidateJobParameters(jobType interface{}, params map[string]interface{}) error
|
||||
|
||||
// Get the stats of the specified job
|
||||
//
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
// Returns:
|
||||
// models.JobStats : job stats data
|
||||
// error : error returned if meet any problems
|
||||
GetJobStats(jobID string) (models.JobStats, error)
|
||||
|
||||
// Stop the job
|
||||
//
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
// Return:
|
||||
// error : error returned if meet any problems
|
||||
StopJob(jobID string) error
|
||||
|
||||
// Cancel the job
|
||||
//
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
// Return:
|
||||
// error : error returned if meet any problems
|
||||
CancelJob(jobID string) error
|
||||
|
||||
// Retry the job
|
||||
//
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
// Return:
|
||||
// error : error returned if meet any problems
|
||||
RetryJob(jobID string) error
|
||||
|
||||
// Register hook
|
||||
//
|
||||
// jobID string : ID of job
|
||||
// hookURL string : the hook url
|
||||
//
|
||||
// Return:
|
||||
// error : error returned if meet any problems
|
||||
RegisterHook(jobID string, hookURL string) error
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/period"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
msgServerRetryTimes = 5
|
||||
)
|
||||
|
||||
// MessageServer implements the sub/pub mechanism via redis to do async message exchanging.
|
||||
type MessageServer struct {
|
||||
context context.Context
|
||||
redisPool *redis.Pool
|
||||
namespace string
|
||||
callbacks map[string]reflect.Value // no need to sync
|
||||
}
|
||||
|
||||
// NewMessageServer creates a new ptr of MessageServer
|
||||
func NewMessageServer(ctx context.Context, namespace string, redisPool *redis.Pool) *MessageServer {
|
||||
return &MessageServer{
|
||||
context: ctx,
|
||||
redisPool: redisPool,
|
||||
namespace: namespace,
|
||||
callbacks: make(map[string]reflect.Value),
|
||||
}
|
||||
}
|
||||
|
||||
// Start to serve
|
||||
func (ms *MessageServer) Start() error {
|
||||
defer func() {
|
||||
logger.Info("Message server is stopped")
|
||||
}()
|
||||
|
||||
conn := ms.redisPool.Get() // Get one backend connection!
|
||||
psc := redis.PubSubConn{
|
||||
Conn: conn,
|
||||
}
|
||||
defer psc.Close()
|
||||
|
||||
// Subscribe channel
|
||||
err := psc.Subscribe(redis.Args{}.AddFlat(utils.KeyPeriodicNotification(ms.namespace))...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
for {
|
||||
switch res := psc.Receive().(type) {
|
||||
case error:
|
||||
done <- fmt.Errorf("error occurred when receiving from pub/sub channel of message server: %s", res.(error).Error())
|
||||
case redis.Message:
|
||||
m := &models.Message{}
|
||||
if err := json.Unmarshal(res.Data, m); err != nil {
|
||||
// logged
|
||||
logger.Warningf("Read invalid message: %s\n", res.Data)
|
||||
}
|
||||
if callback, ok := ms.callbacks[m.Event]; !ok {
|
||||
// logged
|
||||
logger.Warningf("no handler to handle event %s\n", m.Event)
|
||||
} else {
|
||||
// logged incoming events
|
||||
logger.Infof("Receive event '%s' with data(unformatted): %+#v\n", m.Event, m.Data)
|
||||
// Try to recover the concrete type
|
||||
var converted interface{}
|
||||
switch m.Event {
|
||||
case period.EventSchedulePeriodicPolicy,
|
||||
period.EventUnSchedulePeriodicPolicy:
|
||||
// ignore error, actually error should not be happened because we did not change data
|
||||
// after the last unmarshal try.
|
||||
policyObject := &period.PeriodicJobPolicy{}
|
||||
dt, _ := json.Marshal(m.Data)
|
||||
json.Unmarshal(dt, policyObject)
|
||||
converted = policyObject
|
||||
case opm.EventRegisterStatusHook:
|
||||
// ignore error
|
||||
hookObject := &opm.HookData{}
|
||||
dt, _ := json.Marshal(m.Data)
|
||||
json.Unmarshal(dt, hookObject)
|
||||
converted = hookObject
|
||||
case opm.EventFireCommand:
|
||||
// no need to convert []string
|
||||
converted = m.Data
|
||||
}
|
||||
res := callback.Call([]reflect.Value{reflect.ValueOf(converted)})
|
||||
e := res[0].Interface()
|
||||
if e != nil {
|
||||
err := e.(error)
|
||||
// logged
|
||||
logger.Errorf("Failed to fire callback with error: %s\n", err)
|
||||
}
|
||||
}
|
||||
case redis.Subscription:
|
||||
switch res.Kind {
|
||||
case "subscribe":
|
||||
logger.Infof("Subscribe redis channel %s\n", res.Channel)
|
||||
break
|
||||
case "unsubscribe":
|
||||
// Unsubscribe all, means main goroutine is exiting
|
||||
logger.Infof("Unsubscribe redis channel %s\n", res.Channel)
|
||||
done <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
logger.Info("Message server is started")
|
||||
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
// blocking here
|
||||
for err == nil {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err = psc.Ping("ping!")
|
||||
case <-ms.context.Done():
|
||||
err = errors.New("context exit")
|
||||
case err = <-done:
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe all
|
||||
psc.Unsubscribe()
|
||||
|
||||
return <-done
|
||||
}
|
||||
|
||||
// Subscribe event with specified callback
|
||||
func (ms *MessageServer) Subscribe(event string, callback interface{}) error {
|
||||
if utils.IsEmptyStr(event) {
|
||||
return errors.New("empty event is not allowed")
|
||||
}
|
||||
|
||||
handler, err := validateCallbackFunc(callback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms.callbacks[event] = handler
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateCallbackFunc(callback interface{}) (reflect.Value, error) {
|
||||
if callback == nil {
|
||||
return reflect.ValueOf(nil), errors.New("nil callback handler")
|
||||
}
|
||||
|
||||
vFn := reflect.ValueOf(callback)
|
||||
vFType := vFn.Type()
|
||||
if vFType.Kind() != reflect.Func {
|
||||
return reflect.ValueOf(nil), errors.New("callback handler must be a generic func")
|
||||
}
|
||||
|
||||
inNum := vFType.NumIn()
|
||||
outNum := vFType.NumOut()
|
||||
if inNum != 1 || outNum != 1 {
|
||||
return reflect.ValueOf(nil), errors.New("callback handler can only be func(interface{})error format")
|
||||
}
|
||||
|
||||
inType := vFType.In(0)
|
||||
var intf *interface{}
|
||||
if inType != reflect.TypeOf(intf).Elem() {
|
||||
return reflect.ValueOf(nil), errors.New("callback handler can only be func(interface{})error format")
|
||||
}
|
||||
|
||||
outType := vFType.Out(0)
|
||||
var e *error
|
||||
if outType != reflect.TypeOf(e).Elem() {
|
||||
return reflect.ValueOf(nil), errors.New("callback handler can only be func(interface{})error format")
|
||||
}
|
||||
|
||||
return vFn, nil
|
||||
}
|
@ -1,211 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/period"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
)
|
||||
|
||||
var redisPool = tests.GiveMeRedisPool()
|
||||
|
||||
func TestPublishPolicy(t *testing.T) {
|
||||
ms, cancel := createMessageServer()
|
||||
err := ms.Subscribe(period.EventSchedulePeriodicPolicy, func(data interface{}) error {
|
||||
if _, ok := data.(*period.PeriodicJobPolicy); !ok {
|
||||
t.Fatal("expect PeriodicJobPolicy but got other thing")
|
||||
return errors.New("expect PeriodicJobPolicy but got other thing")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ms.Subscribe(period.EventUnSchedulePeriodicPolicy, func(data interface{}) error {
|
||||
if _, ok := data.(*period.PeriodicJobPolicy); !ok {
|
||||
t.Fatal("expect PeriodicJobPolicy but got other thing")
|
||||
return errors.New("expect PeriodicJobPolicy but got other thing")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
// wait and then publish
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
p := &period.PeriodicJobPolicy{
|
||||
PolicyID: "fake_ID",
|
||||
JobName: "fake_job",
|
||||
CronSpec: "5 * * * *",
|
||||
}
|
||||
notification := &models.Message{
|
||||
Event: period.EventSchedulePeriodicPolicy,
|
||||
Data: p,
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
conn := redisPool.Get()
|
||||
defer conn.Close()
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), rawJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
notification.Event = period.EventUnSchedulePeriodicPolicy
|
||||
rawJSON, err = json.Marshal(notification)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), rawJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// send quit signal
|
||||
<-time.After(200 * time.Millisecond)
|
||||
err = tests.Clear(utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), conn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
ms.Start()
|
||||
}
|
||||
|
||||
func TestPublishHook(t *testing.T) {
|
||||
ms, cancel := createMessageServer()
|
||||
err := ms.Subscribe(opm.EventRegisterStatusHook, func(data interface{}) error {
|
||||
if _, ok := data.(*opm.HookData); !ok {
|
||||
t.Fatal("expect HookData but got other thing")
|
||||
return errors.New("expect HookData but got other thing")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
|
||||
<-time.After(200 * time.Millisecond)
|
||||
hook := &opm.HookData{
|
||||
JobID: "fake_job_ID",
|
||||
HookURL: "http://localhost:9999/hook",
|
||||
}
|
||||
notification := &models.Message{
|
||||
Event: opm.EventRegisterStatusHook,
|
||||
Data: hook,
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
conn := redisPool.Get()
|
||||
defer conn.Close()
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), rawJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// send quit signal
|
||||
<-time.After(200 * time.Millisecond)
|
||||
err = tests.Clear(utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), conn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
ms.Start()
|
||||
}
|
||||
|
||||
func TestPublishCommands(t *testing.T) {
|
||||
ms, cancel := createMessageServer()
|
||||
err := ms.Subscribe(opm.EventFireCommand, func(data interface{}) error {
|
||||
cmds, ok := data.([]string)
|
||||
if !ok {
|
||||
t.Fatal("expect fired command but got other thing")
|
||||
return errors.New("expect fired command but got other thing")
|
||||
}
|
||||
if len(cmds) != 2 {
|
||||
t.Fatalf("expect a array with 2 items but only got '%d' items", len(cmds))
|
||||
return fmt.Errorf("expect a array with 2 items but only got '%d' items", len(cmds))
|
||||
}
|
||||
if cmds[1] != "stop" {
|
||||
t.Fatalf("expect command 'stop' but got '%s'", cmds[1])
|
||||
return fmt.Errorf("expect command 'stop' but got '%s'", cmds[1])
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
notification := &models.Message{
|
||||
Event: opm.EventRegisterStatusHook,
|
||||
Data: []string{"fake_job_ID", "stop"},
|
||||
}
|
||||
|
||||
rawJSON, err := json.Marshal(notification)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
conn := redisPool.Get()
|
||||
defer conn.Close()
|
||||
err = conn.Send("PUBLISH", utils.KeyPeriodicNotification(tests.GiveMeTestNamespace()), rawJSON)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// hold for a while
|
||||
<-time.After(200 * time.Millisecond)
|
||||
}()
|
||||
|
||||
ms.Start()
|
||||
}
|
||||
|
||||
func createMessageServer() (*MessageServer, context.CancelFunc) {
|
||||
ns := tests.GiveMeTestNamespace()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return NewMessageServer(ctx, ns, redisPool), cancel
|
||||
}
|
@ -1,267 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
// RedisJob is a job wrapper to wrap the job.Interface to the style which can be recognized by the redis pool.
|
||||
type RedisJob struct {
|
||||
job interface{} // the real job implementation
|
||||
context *env.Context // context
|
||||
statsManager opm.JobStatsManager // job stats manager
|
||||
deDuplicator DeDuplicator // handle unique job
|
||||
}
|
||||
|
||||
// NewRedisJob is constructor of RedisJob
|
||||
func NewRedisJob(j interface{}, ctx *env.Context, statsManager opm.JobStatsManager, deDuplicator DeDuplicator) *RedisJob {
|
||||
return &RedisJob{
|
||||
job: j,
|
||||
context: ctx,
|
||||
statsManager: statsManager,
|
||||
deDuplicator: deDuplicator,
|
||||
}
|
||||
}
|
||||
|
||||
// Run the job
|
||||
func (rj *RedisJob) Run(j *work.Job) error {
|
||||
var (
|
||||
cancelled = false
|
||||
buildContextFailed = false
|
||||
runningJob job.Interface
|
||||
err error
|
||||
execContext env.JobContext
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
logger.Infof("Job '%s:%s' exit with success", j.Name, j.ID)
|
||||
return // nothing need to do
|
||||
}
|
||||
|
||||
// log error
|
||||
logger.Errorf("Job '%s:%s' exit with error: %s\n", j.Name, j.ID, err)
|
||||
|
||||
if buildContextFailed || rj.shouldDisableRetry(runningJob, j, cancelled) {
|
||||
j.Fails = 10000000000 // Make it big enough to avoid retrying
|
||||
now := time.Now().Unix()
|
||||
go func() {
|
||||
timer := time.NewTimer(2 * time.Second) // make sure the failed job is already put into the dead queue
|
||||
defer timer.Stop()
|
||||
|
||||
<-timer.C
|
||||
|
||||
rj.statsManager.DieAt(j.ID, now)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("Runtime error: %s", r)
|
||||
|
||||
// Log the stack
|
||||
buf := make([]byte, 1<<16)
|
||||
size := runtime.Stack(buf, false)
|
||||
logger.Errorf("Runtime error happened when executing job %s:%s: %s", j.Name, j.ID, buf[0:size])
|
||||
|
||||
// record runtime error status
|
||||
rj.jobFailed(j.ID)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wrap job
|
||||
runningJob = Wrap(rj.job)
|
||||
|
||||
execContext, err = rj.buildContext(j)
|
||||
if err != nil {
|
||||
buildContextFailed = true
|
||||
goto FAILED // no need to retry
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Close open io stream first
|
||||
if closer, ok := execContext.GetLogger().(logger.Closer); ok {
|
||||
err := closer.Close()
|
||||
if err != nil {
|
||||
logger.Errorf("Close job logger failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if j.Unique {
|
||||
defer func() {
|
||||
if err := rj.deDuplicator.DelUniqueSign(j.Name, j.Args); err != nil {
|
||||
logger.Errorf("delete job unique sign error: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start to run
|
||||
rj.jobRunning(j.ID)
|
||||
|
||||
// Inject data
|
||||
err = runningJob.Run(execContext, j.Args)
|
||||
|
||||
// update the proper status
|
||||
if err == nil {
|
||||
rj.jobSucceed(j.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if errs.IsJobStoppedError(err) {
|
||||
rj.jobStopped(j.ID)
|
||||
return nil // no need to put it into the dead queue for resume
|
||||
}
|
||||
|
||||
if errs.IsJobCancelledError(err) {
|
||||
rj.jobCancelled(j.ID)
|
||||
cancelled = true
|
||||
return err // need to resume
|
||||
}
|
||||
|
||||
FAILED:
|
||||
rj.jobFailed(j.ID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (rj *RedisJob) jobRunning(jobID string) {
|
||||
rj.statsManager.SetJobStatus(jobID, job.JobStatusRunning)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) jobFailed(jobID string) {
|
||||
rj.statsManager.SetJobStatus(jobID, job.JobStatusError)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) jobStopped(jobID string) {
|
||||
rj.statsManager.SetJobStatus(jobID, job.JobStatusStopped)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) jobCancelled(jobID string) {
|
||||
rj.statsManager.SetJobStatus(jobID, job.JobStatusCancelled)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) jobSucceed(jobID string) {
|
||||
rj.statsManager.SetJobStatus(jobID, job.JobStatusSuccess)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) buildContext(j *work.Job) (env.JobContext, error) {
|
||||
// Build job execution context
|
||||
jData := env.JobData{
|
||||
ID: j.ID,
|
||||
Name: j.Name,
|
||||
Args: j.Args,
|
||||
ExtraData: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
checkOPCmdFuncFactory := func(jobID string) job.CheckOPCmdFunc {
|
||||
return func() (string, bool) {
|
||||
cmd, err := rj.statsManager.CtlCommand(jobID)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
return cmd, true
|
||||
}
|
||||
}
|
||||
|
||||
jData.ExtraData["opCommandFunc"] = checkOPCmdFuncFactory(j.ID)
|
||||
|
||||
checkInFuncFactory := func(jobID string) job.CheckInFunc {
|
||||
return func(message string) {
|
||||
rj.statsManager.CheckIn(jobID, message)
|
||||
}
|
||||
}
|
||||
|
||||
jData.ExtraData["checkInFunc"] = checkInFuncFactory(j.ID)
|
||||
|
||||
launchJobFuncFactory := func(jobID string) job.LaunchJobFunc {
|
||||
funcIntf := rj.context.SystemContext.Value(utils.CtlKeyOfLaunchJobFunc)
|
||||
return func(jobReq models.JobRequest) (models.JobStats, error) {
|
||||
launchJobFunc, ok := funcIntf.(job.LaunchJobFunc)
|
||||
if !ok {
|
||||
return models.JobStats{}, errors.New("no launch job func provided")
|
||||
}
|
||||
|
||||
jobName := ""
|
||||
if jobReq.Job != nil {
|
||||
jobName = jobReq.Job.Name
|
||||
}
|
||||
if j.Name == jobName {
|
||||
return models.JobStats{}, errors.New("infinite job creating loop may exist")
|
||||
}
|
||||
|
||||
res, err := launchJobFunc(jobReq)
|
||||
if err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
if err := rj.statsManager.Update(jobID, "multiple_executions", true); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
if err := rj.statsManager.Update(res.Stats.JobID, "upstream_job_id", jobID); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
rj.statsManager.AttachExecution(jobID, res.Stats.JobID)
|
||||
|
||||
logger.Infof("Launch sub job %s:%s for upstream job %s", res.Stats.JobName, res.Stats.JobID, jobID)
|
||||
return res, nil
|
||||
}
|
||||
}
|
||||
|
||||
jData.ExtraData["launchJobFunc"] = launchJobFuncFactory(j.ID)
|
||||
|
||||
// Use default context
|
||||
if rj.context.JobContext == nil {
|
||||
rj.context.JobContext = impl.NewDefaultContext(rj.context.SystemContext)
|
||||
}
|
||||
|
||||
return rj.context.JobContext.Build(jData)
|
||||
}
|
||||
|
||||
func (rj *RedisJob) shouldDisableRetry(j job.Interface, wj *work.Job, cancelled bool) bool {
|
||||
maxFails := j.MaxFails()
|
||||
if maxFails == 0 {
|
||||
maxFails = 4 // Consistent with backend worker pool
|
||||
}
|
||||
fails := wj.Fails
|
||||
fails++ // as the fail is not returned to backend pool yet
|
||||
|
||||
if cancelled && fails < int64(maxFails) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !cancelled && fails < int64(maxFails) && !j.ShouldRetry() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
@ -1,120 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/backend"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
)
|
||||
|
||||
func TestJobWrapper(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
mgr := opm.NewRedisJobStatsManager(ctx, tests.GiveMeTestNamespace(), rPool)
|
||||
mgr.Start()
|
||||
defer mgr.Shutdown()
|
||||
<-time.After(200 * time.Millisecond)
|
||||
|
||||
var launchJobFunc job.LaunchJobFunc = func(req models.JobRequest) (models.JobStats, error) {
|
||||
return models.JobStats{}, nil
|
||||
}
|
||||
ctx = context.WithValue(ctx, utils.CtlKeyOfLaunchJobFunc, launchJobFunc)
|
||||
envContext := &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: &sync.WaitGroup{},
|
||||
ErrorChan: make(chan error, 1), // with 1 buffer
|
||||
}
|
||||
deDuplicator := NewRedisDeDuplicator(tests.GiveMeTestNamespace(), rPool)
|
||||
wrapper := NewRedisJob((*fakeParentJob)(nil), envContext, mgr, deDuplicator)
|
||||
j := &work.Job{
|
||||
ID: "FAKE",
|
||||
Name: "DEMO",
|
||||
EnqueuedAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
}
|
||||
|
||||
oldJobLoggerCfg := config.DefaultConfig.JobLoggerConfigs
|
||||
defer func() {
|
||||
config.DefaultConfig.JobLoggerConfigs = oldJobLoggerCfg
|
||||
}()
|
||||
|
||||
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
|
||||
{
|
||||
Name: "STD_OUTPUT",
|
||||
Level: "DEBUG",
|
||||
Settings: map[string]interface{}{
|
||||
"output": backend.StdErr,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FILE",
|
||||
Level: "ERROR",
|
||||
Settings: map[string]interface{}{
|
||||
"base_dir": os.TempDir(),
|
||||
},
|
||||
Sweeper: &config.LogSweeperConfig{
|
||||
Duration: 5,
|
||||
Settings: map[string]interface{}{
|
||||
"work_dir": os.TempDir(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := wrapper.Run(j); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
type fakeParentJob struct{}
|
||||
|
||||
func (j *fakeParentJob) MaxFails() uint {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) ShouldRetry() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) Validate(params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
ctx.Checkin("start")
|
||||
ctx.OPCommand()
|
||||
ctx.LaunchJob(models.JobRequest{
|
||||
Job: &models.JobData{
|
||||
Name: "SUB_JOB",
|
||||
Metadata: &models.JobMetadata{
|
||||
JobKind: job.JobKindGeneric,
|
||||
},
|
||||
},
|
||||
})
|
||||
return nil
|
||||
}
|
@ -1,739 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
"github.com/goharbor/harbor/src/jobservice/period"
|
||||
"github.com/goharbor/harbor/src/jobservice/utils"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
)
|
||||
|
||||
var (
|
||||
workerPoolDeadTime = 10 * time.Second
|
||||
)
|
||||
|
||||
const (
|
||||
workerPoolStatusHealthy = "Healthy"
|
||||
workerPoolStatusDead = "Dead"
|
||||
|
||||
// Copy from period.enqueuer
|
||||
periodicEnqueuerHorizon = 4 * time.Minute
|
||||
|
||||
pingRedisMaxTimes = 10
|
||||
)
|
||||
|
||||
// GoCraftWorkPool is the pool implementation based on gocraft/work powered by redis.
|
||||
type GoCraftWorkPool struct {
|
||||
namespace string
|
||||
redisPool *redis.Pool
|
||||
pool *work.WorkerPool
|
||||
enqueuer *work.Enqueuer
|
||||
sweeper *period.Sweeper
|
||||
client *work.Client
|
||||
context *env.Context
|
||||
scheduler period.Interface
|
||||
statsManager opm.JobStatsManager
|
||||
messageServer *MessageServer
|
||||
deDuplicator DeDuplicator
|
||||
|
||||
// no need to sync as write once and then only read
|
||||
// key is name of known job
|
||||
// value is the type of known job
|
||||
knownJobs map[string]interface{}
|
||||
}
|
||||
|
||||
// RedisPoolContext ...
|
||||
// We did not use this context to pass context info so far, just a placeholder.
|
||||
type RedisPoolContext struct{}
|
||||
|
||||
// NewGoCraftWorkPool is constructor of goCraftWorkPool.
|
||||
func NewGoCraftWorkPool(ctx *env.Context, namespace string, workerCount uint, redisPool *redis.Pool) *GoCraftWorkPool {
|
||||
pool := work.NewWorkerPool(RedisPoolContext{}, workerCount, namespace, redisPool)
|
||||
enqueuer := work.NewEnqueuer(namespace, redisPool)
|
||||
client := work.NewClient(namespace, redisPool)
|
||||
statsMgr := opm.NewRedisJobStatsManager(ctx.SystemContext, namespace, redisPool)
|
||||
scheduler := period.NewRedisPeriodicScheduler(ctx, namespace, redisPool, statsMgr)
|
||||
sweeper := period.NewSweeper(namespace, redisPool, client)
|
||||
msgServer := NewMessageServer(ctx.SystemContext, namespace, redisPool)
|
||||
deDepulicator := NewRedisDeDuplicator(namespace, redisPool)
|
||||
return &GoCraftWorkPool{
|
||||
namespace: namespace,
|
||||
redisPool: redisPool,
|
||||
pool: pool,
|
||||
enqueuer: enqueuer,
|
||||
scheduler: scheduler,
|
||||
sweeper: sweeper,
|
||||
client: client,
|
||||
context: ctx,
|
||||
statsManager: statsMgr,
|
||||
knownJobs: make(map[string]interface{}),
|
||||
messageServer: msgServer,
|
||||
deDuplicator: deDepulicator,
|
||||
}
|
||||
}
|
||||
|
||||
// Start to serve
|
||||
// Unblock action
|
||||
func (gcwp *GoCraftWorkPool) Start() error {
|
||||
if gcwp.redisPool == nil ||
|
||||
gcwp.pool == nil ||
|
||||
gcwp.context.SystemContext == nil {
|
||||
// report and exit
|
||||
return errors.New("Redis worker pool can not start as it's not correctly configured")
|
||||
}
|
||||
|
||||
// Test the redis connection
|
||||
if err := gcwp.ping(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
gcwp.context.WG.Add(1)
|
||||
go func() {
|
||||
var err error
|
||||
|
||||
defer func() {
|
||||
gcwp.context.WG.Done()
|
||||
if err != nil {
|
||||
// report error
|
||||
gcwp.context.ErrorChan <- err
|
||||
done <- struct{}{} // exit immediately
|
||||
}
|
||||
}()
|
||||
|
||||
// Register callbacks
|
||||
if err = gcwp.messageServer.Subscribe(period.EventSchedulePeriodicPolicy,
|
||||
func(data interface{}) error {
|
||||
return gcwp.handleSchedulePolicy(data)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = gcwp.messageServer.Subscribe(period.EventUnSchedulePeriodicPolicy,
|
||||
func(data interface{}) error {
|
||||
return gcwp.handleUnSchedulePolicy(data)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = gcwp.messageServer.Subscribe(opm.EventRegisterStatusHook,
|
||||
func(data interface{}) error {
|
||||
return gcwp.handleRegisterStatusHook(data)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if err = gcwp.messageServer.Subscribe(opm.EventFireCommand,
|
||||
func(data interface{}) error {
|
||||
return gcwp.handleOPCommandFiring(data)
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
startTimes := 0
|
||||
START_MSG_SERVER:
|
||||
// Start message server
|
||||
if err = gcwp.messageServer.Start(); err != nil {
|
||||
logger.Errorf("Message server exits with error: %s\n", err.Error())
|
||||
if startTimes < msgServerRetryTimes {
|
||||
startTimes++
|
||||
time.Sleep(time.Duration((int)(math.Pow(2, (float64)(startTimes)))+5) * time.Second)
|
||||
logger.Infof("Restart message server (%d times)\n", startTimes)
|
||||
goto START_MSG_SERVER
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
gcwp.context.WG.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
gcwp.context.WG.Done()
|
||||
gcwp.statsManager.Shutdown()
|
||||
}()
|
||||
// Start stats manager
|
||||
// None-blocking
|
||||
gcwp.statsManager.Start()
|
||||
|
||||
// blocking call
|
||||
gcwp.scheduler.Start()
|
||||
}()
|
||||
|
||||
gcwp.context.WG.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
gcwp.context.WG.Done()
|
||||
logger.Infof("Redis worker pool is stopped")
|
||||
}()
|
||||
|
||||
// Clear dirty data before pool starting
|
||||
if err := gcwp.sweeper.ClearOutdatedScheduledJobs(); err != nil {
|
||||
// Only logged
|
||||
logger.Errorf("Clear outdated data before pool starting failed with error:%s\n", err)
|
||||
}
|
||||
|
||||
// Append middlewares
|
||||
gcwp.pool.Middleware((*RedisPoolContext).logJob)
|
||||
|
||||
gcwp.pool.Start()
|
||||
logger.Infof("Redis worker pool is started")
|
||||
|
||||
// Block on listening context and done signal
|
||||
select {
|
||||
case <-gcwp.context.SystemContext.Done():
|
||||
case <-done:
|
||||
}
|
||||
|
||||
gcwp.pool.Stop()
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterJob is used to register the job to the pool.
|
||||
// j is the type of job
|
||||
func (gcwp *GoCraftWorkPool) RegisterJob(name string, j interface{}) error {
|
||||
if utils.IsEmptyStr(name) || j == nil {
|
||||
return errors.New("job can not be registered with empty name or nil interface")
|
||||
}
|
||||
|
||||
// j must be job.Interface
|
||||
if _, ok := j.(job.Interface); !ok {
|
||||
return errors.New("job must implement the job.Interface")
|
||||
}
|
||||
|
||||
// 1:1 constraint
|
||||
if jInList, ok := gcwp.knownJobs[name]; ok {
|
||||
return fmt.Errorf("Job name %s has been already registered with %s", name, reflect.TypeOf(jInList).String())
|
||||
}
|
||||
|
||||
// Same job implementation can be only registered with one name
|
||||
for jName, jInList := range gcwp.knownJobs {
|
||||
jobImpl := reflect.TypeOf(j).String()
|
||||
if reflect.TypeOf(jInList).String() == jobImpl {
|
||||
return fmt.Errorf("Job %s has been already registered with name %s", jobImpl, jName)
|
||||
}
|
||||
}
|
||||
|
||||
redisJob := NewRedisJob(j, gcwp.context, gcwp.statsManager, gcwp.deDuplicator)
|
||||
|
||||
// Get more info from j
|
||||
theJ := Wrap(j)
|
||||
|
||||
gcwp.pool.JobWithOptions(name,
|
||||
work.JobOptions{MaxFails: theJ.MaxFails()},
|
||||
func(job *work.Job) error {
|
||||
return redisJob.Run(job)
|
||||
}, // Use generic handler to handle as we do not accept context with this way.
|
||||
)
|
||||
gcwp.knownJobs[name] = j // keep the name of registered jobs as known jobs for future validation
|
||||
|
||||
logger.Infof("Register job %s with name %s", reflect.TypeOf(j).String(), name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterJobs is used to register multiple jobs to pool.
|
||||
func (gcwp *GoCraftWorkPool) RegisterJobs(jobs map[string]interface{}) error {
|
||||
if jobs == nil || len(jobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for name, j := range jobs {
|
||||
if err := gcwp.RegisterJob(name, j); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enqueue job
|
||||
func (gcwp *GoCraftWorkPool) Enqueue(jobName string, params models.Parameters, isUnique bool) (models.JobStats, error) {
|
||||
var (
|
||||
j *work.Job
|
||||
err error
|
||||
)
|
||||
|
||||
// As the job is declared to be unique,
|
||||
// check the uniqueness of the job,
|
||||
// if no duplicated job existing (including the running jobs),
|
||||
// set the unique flag.
|
||||
if isUnique {
|
||||
if err = gcwp.deDuplicator.Unique(jobName, params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
if j, err = gcwp.enqueuer.EnqueueUnique(jobName, params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
} else {
|
||||
// Enqueue job
|
||||
if j, err = gcwp.enqueuer.Enqueue(jobName, params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// avoid backend pool bug
|
||||
if j == nil {
|
||||
return models.JobStats{}, fmt.Errorf("job '%s' can not be enqueued, please check the job metatdata", jobName)
|
||||
}
|
||||
|
||||
res := generateResult(j, job.JobKindGeneric, isUnique)
|
||||
// Save data with async way. Once it fails to do, let it escape
|
||||
// The client method may help if the job is still in progress when get stats of this job
|
||||
gcwp.statsManager.Save(res)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Schedule job
|
||||
func (gcwp *GoCraftWorkPool) Schedule(jobName string, params models.Parameters, runAfterSeconds uint64, isUnique bool) (models.JobStats, error) {
|
||||
var (
|
||||
j *work.ScheduledJob
|
||||
err error
|
||||
)
|
||||
|
||||
// As the job is declared to be unique,
|
||||
// check the uniqueness of the job,
|
||||
// if no duplicated job existing (including the running jobs),
|
||||
// set the unique flag.
|
||||
if isUnique {
|
||||
if err = gcwp.deDuplicator.Unique(jobName, params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
if j, err = gcwp.enqueuer.EnqueueUniqueIn(jobName, int64(runAfterSeconds), params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
} else {
|
||||
// Enqueue job in
|
||||
if j, err = gcwp.enqueuer.EnqueueIn(jobName, int64(runAfterSeconds), params); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// avoid backend pool bug
|
||||
if j == nil {
|
||||
return models.JobStats{}, fmt.Errorf("job '%s' can not be enqueued, please check the job metatdata", jobName)
|
||||
}
|
||||
|
||||
res := generateResult(j.Job, job.JobKindScheduled, isUnique)
|
||||
res.Stats.RunAt = j.RunAt
|
||||
|
||||
// As job is already scheduled, we should not block this call
|
||||
// Once it fails to do, use client method to help get the status of the escape job
|
||||
gcwp.statsManager.Save(res)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// PeriodicallyEnqueue job
|
||||
func (gcwp *GoCraftWorkPool) PeriodicallyEnqueue(jobName string, params models.Parameters, cronSetting string) (models.JobStats, error) {
|
||||
id, nextRun, err := gcwp.scheduler.Schedule(jobName, params, cronSetting)
|
||||
if err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
res := models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: id,
|
||||
JobName: jobName,
|
||||
Status: job.JobStatusPending,
|
||||
JobKind: job.JobKindPeriodic,
|
||||
CronSpec: cronSetting,
|
||||
EnqueueTime: time.Now().Unix(),
|
||||
UpdateTime: time.Now().Unix(),
|
||||
RefLink: fmt.Sprintf("/api/v1/jobs/%s", id),
|
||||
RunAt: nextRun,
|
||||
IsMultipleExecutions: true, // True for periodic job
|
||||
},
|
||||
}
|
||||
|
||||
gcwp.statsManager.Save(res)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// GetJobStats return the job stats of the specified enqueued job.
|
||||
func (gcwp *GoCraftWorkPool) GetJobStats(jobID string) (models.JobStats, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return models.JobStats{}, errors.New("empty job ID")
|
||||
}
|
||||
|
||||
return gcwp.statsManager.Retrieve(jobID)
|
||||
}
|
||||
|
||||
// Stats of pool
|
||||
func (gcwp *GoCraftWorkPool) Stats() (models.JobPoolStats, error) {
|
||||
// Get the status of workerpool via client
|
||||
hbs, err := gcwp.client.WorkerPoolHeartbeats()
|
||||
if err != nil {
|
||||
return models.JobPoolStats{}, err
|
||||
}
|
||||
|
||||
// Find the heartbeat of this pool via pid
|
||||
stats := make([]*models.JobPoolStatsData, 0)
|
||||
for _, hb := range hbs {
|
||||
if hb.HeartbeatAt == 0 {
|
||||
continue // invalid ones
|
||||
}
|
||||
|
||||
wPoolStatus := workerPoolStatusHealthy
|
||||
if time.Unix(hb.HeartbeatAt, 0).Add(workerPoolDeadTime).Before(time.Now()) {
|
||||
wPoolStatus = workerPoolStatusDead
|
||||
}
|
||||
stat := &models.JobPoolStatsData{
|
||||
WorkerPoolID: hb.WorkerPoolID,
|
||||
StartedAt: hb.StartedAt,
|
||||
HeartbeatAt: hb.HeartbeatAt,
|
||||
JobNames: hb.JobNames,
|
||||
Concurrency: hb.Concurrency,
|
||||
Status: wPoolStatus,
|
||||
}
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
|
||||
if len(stats) == 0 {
|
||||
return models.JobPoolStats{}, errors.New("Failed to get stats of worker pools")
|
||||
}
|
||||
|
||||
return models.JobPoolStats{
|
||||
Pools: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StopJob will stop the job
|
||||
func (gcwp *GoCraftWorkPool) StopJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
theJob, err := gcwp.statsManager.Retrieve(jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch theJob.Stats.JobKind {
|
||||
case job.JobKindGeneric:
|
||||
// Only running job can be stopped
|
||||
if theJob.Stats.Status != job.JobStatusRunning {
|
||||
return fmt.Errorf("job '%s' is not a running job", jobID)
|
||||
}
|
||||
case job.JobKindScheduled:
|
||||
// we need to delete the scheduled job in the queue if it is not running yet
|
||||
// otherwise, stop it.
|
||||
if theJob.Stats.Status == job.JobStatusPending {
|
||||
if err := gcwp.client.DeleteScheduledJob(theJob.Stats.RunAt, jobID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the job status to 'stopped'
|
||||
gcwp.statsManager.SetJobStatus(jobID, job.JobStatusStopped)
|
||||
|
||||
logger.Debugf("Scheduled job which plan to run at %d '%s' is stopped", theJob.Stats.RunAt, jobID)
|
||||
|
||||
return nil
|
||||
}
|
||||
case job.JobKindPeriodic:
|
||||
// firstly delete the periodic job policy
|
||||
if err := gcwp.scheduler.UnSchedule(jobID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Infof("Periodic job policy %s is removed", jobID)
|
||||
|
||||
// secondly we need try to delete the job instances scheduled for this periodic job, a try best action
|
||||
if err := gcwp.deleteScheduledJobsOfPeriodicPolicy(theJob.Stats.JobID); err != nil {
|
||||
// only logged
|
||||
logger.Errorf("Errors happened when deleting jobs of periodic policy %s: %s", theJob.Stats.JobID, err)
|
||||
}
|
||||
|
||||
// thirdly expire the job stats of this periodic job if exists
|
||||
if err := gcwp.statsManager.ExpirePeriodicJobStats(theJob.Stats.JobID); err != nil {
|
||||
// only logged
|
||||
logger.Errorf("Expire the stats of job %s failed with error: %s\n", theJob.Stats.JobID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Job kind %s is not supported", theJob.Stats.JobKind)
|
||||
}
|
||||
|
||||
// Check if the job has 'running' instance
|
||||
if theJob.Stats.Status == job.JobStatusRunning {
|
||||
// Send 'stop' ctl command to the running instance
|
||||
if err := gcwp.statsManager.SendCommand(jobID, opm.CtlCommandStop, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelJob will cancel the job
|
||||
func (gcwp *GoCraftWorkPool) CancelJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
theJob, err := gcwp.statsManager.Retrieve(jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch theJob.Stats.JobKind {
|
||||
case job.JobKindGeneric:
|
||||
if theJob.Stats.Status != job.JobStatusRunning {
|
||||
return fmt.Errorf("only running job can be cancelled, job '%s' seems not running now", theJob.Stats.JobID)
|
||||
}
|
||||
|
||||
// Send 'cancel' ctl command to the running instance
|
||||
if err := gcwp.statsManager.SendCommand(jobID, opm.CtlCommandCancel, false); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
default:
|
||||
return fmt.Errorf("job kind '%s' does not support 'cancel' operation", theJob.Stats.JobKind)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetryJob retry the job
|
||||
func (gcwp *GoCraftWorkPool) RetryJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
theJob, err := gcwp.statsManager.Retrieve(jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if theJob.Stats.DieAt == 0 {
|
||||
return fmt.Errorf("job '%s' is not a retryable job", jobID)
|
||||
}
|
||||
|
||||
return gcwp.client.RetryDeadJob(theJob.Stats.DieAt, jobID)
|
||||
}
|
||||
|
||||
// IsKnownJob ...
|
||||
func (gcwp *GoCraftWorkPool) IsKnownJob(name string) (interface{}, bool) {
|
||||
v, ok := gcwp.knownJobs[name]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// ValidateJobParameters ...
|
||||
func (gcwp *GoCraftWorkPool) ValidateJobParameters(jobType interface{}, params map[string]interface{}) error {
|
||||
if jobType == nil {
|
||||
return errors.New("nil job type")
|
||||
}
|
||||
|
||||
theJ := Wrap(jobType)
|
||||
return theJ.Validate(params)
|
||||
}
|
||||
|
||||
// RegisterHook registers status hook url
|
||||
// sync method
|
||||
func (gcwp *GoCraftWorkPool) RegisterHook(jobID string, hookURL string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
}
|
||||
|
||||
if !utils.IsValidURL(hookURL) {
|
||||
return errors.New("invalid hook url")
|
||||
}
|
||||
|
||||
return gcwp.statsManager.RegisterHook(jobID, hookURL, false)
|
||||
}
|
||||
|
||||
// A try best method to delete the scheduled jobs of one periodic job
|
||||
func (gcwp *GoCraftWorkPool) deleteScheduledJobsOfPeriodicPolicy(policyID string) error {
|
||||
// Check the scope of [-periodicEnqueuerHorizon, -1]
|
||||
// If the job is still not completed after a 'periodicEnqueuerHorizon', just ignore it
|
||||
now := time.Now().Unix() // Baseline
|
||||
startTime := now - (int64)(periodicEnqueuerHorizon/time.Minute)*60
|
||||
|
||||
// Try to delete more
|
||||
// Get the range scope
|
||||
start := (opm.Range)(startTime)
|
||||
ids, err := gcwp.statsManager.GetExecutions(policyID, start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debugf("Found scheduled jobs '%v' in scope [%d,+inf] for periodic job policy %s", ids, start, policyID)
|
||||
|
||||
if len(ids) == 0 {
|
||||
// Treat as a normal case, nothing need to do
|
||||
return nil
|
||||
}
|
||||
|
||||
multiErrs := []string{}
|
||||
for _, id := range ids {
|
||||
subJob, err := gcwp.statsManager.Retrieve(id)
|
||||
if err != nil {
|
||||
multiErrs = append(multiErrs, err.Error())
|
||||
continue // going on
|
||||
}
|
||||
|
||||
if subJob.Stats.Status == job.JobStatusRunning {
|
||||
// Send 'stop' ctl command to the running instance
|
||||
if err := gcwp.statsManager.SendCommand(subJob.Stats.JobID, opm.CtlCommandStop, false); err != nil {
|
||||
multiErrs = append(multiErrs, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debugf("Stop running job %s for periodic job policy %s", subJob.Stats.JobID, policyID)
|
||||
} else {
|
||||
if subJob.Stats.JobKind == job.JobKindScheduled &&
|
||||
subJob.Stats.Status == job.JobStatusPending {
|
||||
// The pending scheduled job
|
||||
if err := gcwp.client.DeleteScheduledJob(subJob.Stats.RunAt, subJob.Stats.JobID); err != nil {
|
||||
multiErrs = append(multiErrs, err.Error())
|
||||
continue // going on
|
||||
}
|
||||
|
||||
// Log action
|
||||
logger.Debugf("Delete scheduled job for periodic job policy %s: runat = %d", policyID, subJob.Stats.RunAt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(multiErrs) > 0 {
|
||||
return errors.New(strings.Join(multiErrs, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gcwp *GoCraftWorkPool) handleSchedulePolicy(data interface{}) error {
|
||||
if data == nil {
|
||||
return errors.New("nil data interface")
|
||||
}
|
||||
|
||||
pl, ok := data.(*period.PeriodicJobPolicy)
|
||||
if !ok {
|
||||
return errors.New("malformed policy object")
|
||||
}
|
||||
|
||||
return gcwp.scheduler.AcceptPeriodicPolicy(pl)
|
||||
}
|
||||
|
||||
func (gcwp *GoCraftWorkPool) handleUnSchedulePolicy(data interface{}) error {
|
||||
if data == nil {
|
||||
return errors.New("nil data interface")
|
||||
}
|
||||
|
||||
pl, ok := data.(*period.PeriodicJobPolicy)
|
||||
if !ok {
|
||||
return errors.New("malformed policy object")
|
||||
}
|
||||
|
||||
removed := gcwp.scheduler.RemovePeriodicPolicy(pl.PolicyID)
|
||||
if removed == nil {
|
||||
return errors.New("nothing removed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gcwp *GoCraftWorkPool) handleRegisterStatusHook(data interface{}) error {
|
||||
if data == nil {
|
||||
return errors.New("nil data interface")
|
||||
}
|
||||
|
||||
hook, ok := data.(*opm.HookData)
|
||||
if !ok {
|
||||
return errors.New("malformed hook object")
|
||||
}
|
||||
|
||||
return gcwp.statsManager.RegisterHook(hook.JobID, hook.HookURL, true)
|
||||
}
|
||||
|
||||
func (gcwp *GoCraftWorkPool) handleOPCommandFiring(data interface{}) error {
|
||||
if data == nil {
|
||||
return errors.New("nil data interface")
|
||||
}
|
||||
|
||||
commands, ok := data.([]interface{})
|
||||
if !ok || len(commands) != 2 {
|
||||
return errors.New("malformed op commands object")
|
||||
}
|
||||
jobID, ok := commands[0].(string)
|
||||
command, ok := commands[1].(string)
|
||||
if !ok {
|
||||
return errors.New("malformed op command info")
|
||||
}
|
||||
|
||||
// Put the command into the maintaining list
|
||||
return gcwp.statsManager.SendCommand(jobID, command, true)
|
||||
}
|
||||
|
||||
// log the job
|
||||
func (rpc *RedisPoolContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error {
|
||||
logger.Infof("Job incoming: %s:%s", job.Name, job.ID)
|
||||
return next()
|
||||
}
|
||||
|
||||
// Ping the redis server
|
||||
func (gcwp *GoCraftWorkPool) ping() error {
|
||||
conn := gcwp.redisPool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
var err error
|
||||
for count := 1; count <= pingRedisMaxTimes; count++ {
|
||||
if _, err = conn.Do("ping"); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(count+4) * time.Second)
|
||||
}
|
||||
|
||||
return fmt.Errorf("connect to redis server timeout: %s", err.Error())
|
||||
}
|
||||
|
||||
// generate the job stats data
|
||||
func generateResult(j *work.Job, jobKind string, isUnique bool) models.JobStats {
|
||||
if j == nil {
|
||||
return models.JobStats{}
|
||||
}
|
||||
|
||||
return models.JobStats{
|
||||
Stats: &models.JobStatData{
|
||||
JobID: j.ID,
|
||||
JobName: j.Name,
|
||||
JobKind: jobKind,
|
||||
IsUnique: isUnique,
|
||||
Status: job.JobStatusPending,
|
||||
EnqueueTime: j.EnqueuedAt,
|
||||
UpdateTime: time.Now().Unix(),
|
||||
RefLink: fmt.Sprintf("/api/v1/jobs/%s", j.ID),
|
||||
},
|
||||
}
|
||||
}
|
@ -1,567 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package pool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/errs"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
)
|
||||
|
||||
var rPool = tests.GiveMeRedisPool()
|
||||
|
||||
func TestRegisterJob(t *testing.T) {
|
||||
wp, _, _ := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := wp.RegisterJob("fake_job", (*fakeJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if _, ok := wp.IsKnownJob("fake_job"); !ok {
|
||||
t.Error("expected known job but registering 'fake_job' appears to have failed")
|
||||
}
|
||||
|
||||
delete(wp.knownJobs, "fake_job")
|
||||
|
||||
jobs := make(map[string]interface{})
|
||||
jobs["fake_job_1st"] = (*fakeJob)(nil)
|
||||
if err := wp.RegisterJobs(jobs); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
if err := wp.ValidateJobParameters((*fakeJob)(nil), params); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnqueueJob(t *testing.T) {
|
||||
wp, sysCtx, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if err := wp.RegisterJob("fake_job", (*fakeJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := wp.RegisterJob("fake_unique_job", (*fakeUniqueJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
stats, err := wp.Enqueue("fake_job", params, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if stats.Stats.JobID == "" {
|
||||
t.Error("expect none nil job stats but got nil")
|
||||
}
|
||||
|
||||
runAt := time.Now().Unix() + 20
|
||||
stats, err = wp.Schedule("fake_job", params, 20, false)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if stats.Stats.RunAt > 0 && stats.Stats.RunAt < runAt {
|
||||
t.Errorf("expect returned 'RunAt' should be >= '%d' but seems not", runAt)
|
||||
}
|
||||
|
||||
stats, err = wp.Enqueue("fake_unique_job", params, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if stats.Stats.JobID == "" {
|
||||
t.Error("expect none nil job stats but got nil")
|
||||
}
|
||||
|
||||
cancel()
|
||||
sysCtx.WG.Wait()
|
||||
}
|
||||
|
||||
func TestEnqueuePeriodicJob(t *testing.T) {
|
||||
wp, _, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if err := wp.RegisterJob("fake_job", (*fakeJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
jobStats, err := wp.PeriodicallyEnqueue("fake_job", params, "10 * * * * *")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
jStats, err := wp.GetJobStats(jobStats.Stats.JobID)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if jobStats.Stats.JobName != jStats.Stats.JobName {
|
||||
t.Error("expect same job stats but got different ones")
|
||||
}
|
||||
|
||||
if err := wp.StopJob(jStats.Stats.JobID); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// cancel()
|
||||
// <-time.After(1 * time.Second)
|
||||
}
|
||||
|
||||
func TestPoolStats(t *testing.T) {
|
||||
wp, _, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
_, err := wp.Stats()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopJob(t *testing.T) {
|
||||
wp, _, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if err := wp.RegisterJob("fake_long_run_job", (*fakeRunnableJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Stop generic job
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
|
||||
genericJob, err := wp.Enqueue("fake_long_run_job", params, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
stats, err := wp.GetJobStats(genericJob.Stats.JobID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stats.Stats.Status != job.JobStatusRunning {
|
||||
t.Fatalf("expect job running but got %s", stats.Stats.Status)
|
||||
}
|
||||
if err := wp.StopJob(genericJob.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Stop scheduled job
|
||||
scheduledJob, err := wp.Schedule("fake_long_run_job", params, 120, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
if err := wp.StopJob(scheduledJob.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelJob(t *testing.T) {
|
||||
wp, _, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if err := wp.RegisterJob("fake_long_run_job", (*fakeRunnableJob)(nil)); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Cancel job
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
|
||||
genericJob, err := wp.Enqueue("fake_long_run_job", params, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
stats, err := wp.GetJobStats(genericJob.Stats.JobID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stats.Stats.Status != job.JobStatusRunning {
|
||||
t.Fatalf("expect job running but got %s", stats.Stats.Status)
|
||||
}
|
||||
|
||||
if err := wp.CancelJob(genericJob.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
stats, err = wp.GetJobStats(genericJob.Stats.JobID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if stats.Stats.Status != job.JobStatusCancelled {
|
||||
t.Fatalf("expect job cancelled but got %s", stats.Stats.Status)
|
||||
}
|
||||
|
||||
if err := wp.RetryJob(genericJob.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
/*func TestCancelAndRetryJobWithHook(t *testing.T) {
|
||||
wp, _, cancel := createRedisWorkerPool()
|
||||
defer func() {
|
||||
if err := tests.ClearAll(tests.GiveMeTestNamespace(), redisPool.Get()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
defer cancel()
|
||||
|
||||
if err := wp.RegisterJob("fake_runnable_job", (*fakeRunnableJob)(nil)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go wp.Start()
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "ok")
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
params := make(map[string]interface{})
|
||||
params["name"] = "testing:v1"
|
||||
res, err := wp.Enqueue("fake_runnable_job", params, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wp.RegisterHook(res.Stats.JobID, ts.URL); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// make sure it's running
|
||||
timer := time.NewTimer(1 * time.Second)
|
||||
defer timer.Stop()
|
||||
|
||||
CHECK:
|
||||
<-timer.C
|
||||
if check, err := wp.GetJobStats(res.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
} else {
|
||||
if check.Stats.Status != job.JobStatusRunning {
|
||||
timer.Reset(1 * time.Second)
|
||||
goto CHECK
|
||||
}
|
||||
}
|
||||
|
||||
// cancel
|
||||
if err := wp.CancelJob(res.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
<-time.After(5 * time.Second)
|
||||
updatedRes, err := wp.GetJobStats(res.Stats.JobID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if updatedRes.Stats.Status != job.JobStatusCancelled {
|
||||
t.Fatalf("expect job staus '%s' but got '%s'\n", job.JobStatusCancelled, updatedRes.Stats.Status)
|
||||
}
|
||||
if updatedRes.Stats.DieAt == 0 {
|
||||
t.Fatalf("expect none zero 'DieAt' but got 0 value")
|
||||
}
|
||||
|
||||
// retry
|
||||
if err := wp.RetryJob(updatedRes.Stats.JobID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}*/
|
||||
|
||||
func createRedisWorkerPool() (*GoCraftWorkPool, *env.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
envCtx := &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
ErrorChan: make(chan error, 1),
|
||||
JobContext: newContext(ctx),
|
||||
}
|
||||
|
||||
return NewGoCraftWorkPool(envCtx, tests.GiveMeTestNamespace(), 3, rPool), envCtx, cancel
|
||||
}
|
||||
|
||||
type fakeJob struct{}
|
||||
|
||||
func (j *fakeJob) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (j *fakeJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (j *fakeJob) Validate(params map[string]interface{}) error {
|
||||
if p, ok := params["name"]; ok {
|
||||
if p == "testing:v1" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("testing error")
|
||||
}
|
||||
|
||||
func (j *fakeJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeUniqueJob struct{}
|
||||
|
||||
func (j *fakeUniqueJob) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (j *fakeUniqueJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (j *fakeUniqueJob) Validate(params map[string]interface{}) error {
|
||||
if p, ok := params["name"]; ok {
|
||||
if p == "testing:v1" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("testing error")
|
||||
}
|
||||
|
||||
func (j *fakeUniqueJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeRunnableJob struct{}
|
||||
|
||||
func (j *fakeRunnableJob) MaxFails() uint {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (j *fakeRunnableJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (j *fakeRunnableJob) Validate(params map[string]interface{}) error {
|
||||
if p, ok := params["name"]; ok {
|
||||
if p == "testing:v1" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("testing error")
|
||||
}
|
||||
|
||||
func (j *fakeRunnableJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
tk := time.NewTicker(200 * time.Millisecond)
|
||||
defer tk.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tk.C:
|
||||
cmd, ok := ctx.OPCommand()
|
||||
if ok {
|
||||
if cmd == opm.CtlCommandStop {
|
||||
return errs.JobStoppedError()
|
||||
}
|
||||
|
||||
return errs.JobCancelledError()
|
||||
}
|
||||
case <-ctx.SystemContext().Done():
|
||||
return nil
|
||||
case <-time.After(1 * time.Minute):
|
||||
return errors.New("fake job timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakeContext struct {
|
||||
// System context
|
||||
sysContext context.Context
|
||||
|
||||
// op command func
|
||||
opCommandFunc job.CheckOPCmdFunc
|
||||
|
||||
// checkin func
|
||||
checkInFunc job.CheckInFunc
|
||||
|
||||
// launch job
|
||||
launchJobFunc job.LaunchJobFunc
|
||||
|
||||
// other required information
|
||||
properties map[string]interface{}
|
||||
}
|
||||
|
||||
func newContext(sysCtx context.Context) *fakeContext {
|
||||
return &fakeContext{
|
||||
sysContext: sysCtx,
|
||||
properties: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Build implements the same method in env.JobContext interface
|
||||
// This func will build the job execution context before running
|
||||
func (c *fakeContext) Build(dep env.JobData) (env.JobContext, error) {
|
||||
jContext := &fakeContext{
|
||||
sysContext: c.sysContext,
|
||||
properties: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Copy properties
|
||||
if len(c.properties) > 0 {
|
||||
for k, v := range c.properties {
|
||||
jContext.properties[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if opCommandFunc, ok := dep.ExtraData["opCommandFunc"]; ok {
|
||||
if reflect.TypeOf(opCommandFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := opCommandFunc.(job.CheckOPCmdFunc); ok {
|
||||
jContext.opCommandFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
if jContext.opCommandFunc == nil {
|
||||
return nil, errors.New("failed to inject opCommandFunc")
|
||||
}
|
||||
|
||||
if checkInFunc, ok := dep.ExtraData["checkInFunc"]; ok {
|
||||
if reflect.TypeOf(checkInFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := checkInFunc.(job.CheckInFunc); ok {
|
||||
jContext.checkInFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.checkInFunc == nil {
|
||||
return nil, errors.New("failed to inject checkInFunc")
|
||||
}
|
||||
|
||||
if launchJobFunc, ok := dep.ExtraData["launchJobFunc"]; ok {
|
||||
if reflect.TypeOf(launchJobFunc).Kind() == reflect.Func {
|
||||
if funcRef, ok := launchJobFunc.(job.LaunchJobFunc); ok {
|
||||
jContext.launchJobFunc = funcRef
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jContext.launchJobFunc == nil {
|
||||
return nil, errors.New("failed to inject launchJobFunc")
|
||||
}
|
||||
|
||||
return jContext, nil
|
||||
}
|
||||
|
||||
// Get implements the same method in env.JobContext interface
|
||||
func (c *fakeContext) Get(prop string) (interface{}, bool) {
|
||||
v, ok := c.properties[prop]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// SystemContext implements the same method in env.JobContext interface
|
||||
func (c *fakeContext) SystemContext() context.Context {
|
||||
return c.sysContext
|
||||
}
|
||||
|
||||
// Checkin is bridge func for reporting detailed status
|
||||
func (c *fakeContext) Checkin(status string) error {
|
||||
if c.checkInFunc != nil {
|
||||
c.checkInFunc(status)
|
||||
} else {
|
||||
return errors.New("nil check in function")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
func (c *fakeContext) OPCommand() (string, bool) {
|
||||
if c.opCommandFunc != nil {
|
||||
return c.opCommandFunc()
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
// GetLogger returns the logger
|
||||
func (c *fakeContext) GetLogger() logger.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LaunchJob launches sub jobs
|
||||
func (c *fakeContext) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
if c.launchJobFunc == nil {
|
||||
return models.JobStats{}, errors.New("nil launch job function")
|
||||
}
|
||||
|
||||
return c.launchJobFunc(req)
|
||||
}
|
196
src/jobservice/runner/redis.go
Normal file
196
src/jobservice/runner/redis.go
Normal file
@ -0,0 +1,196 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package runner
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/jobservice/job/impl"
|
||||
"runtime"
|
||||
|
||||
"fmt"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
"github.com/pkg/errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RedisJob is a job wrapper to wrap the job.Interface to the style which can be recognized by the redis worker.
|
||||
type RedisJob struct {
|
||||
job interface{} // the real job implementation
|
||||
context *env.Context // context
|
||||
ctl lcm.Controller // life cycle controller
|
||||
}
|
||||
|
||||
// NewRedisJob is constructor of RedisJob
|
||||
func NewRedisJob(job interface{}, ctx *env.Context, ctl lcm.Controller) *RedisJob {
|
||||
return &RedisJob{
|
||||
job: job,
|
||||
context: ctx,
|
||||
ctl: ctl,
|
||||
}
|
||||
}
|
||||
|
||||
// Run the job
|
||||
func (rj *RedisJob) Run(j *work.Job) (err error) {
|
||||
var (
|
||||
runningJob job.Interface
|
||||
execContext job.Context
|
||||
tracker job.Tracker
|
||||
markStopped = bp(false)
|
||||
)
|
||||
|
||||
// Defer to log the exit result
|
||||
defer func() {
|
||||
if !*markStopped {
|
||||
if err == nil {
|
||||
logger.Infof("|^_^| Job '%s:%s' exit with success", j.Name, j.ID)
|
||||
} else {
|
||||
// log error
|
||||
logger.Errorf("|@_@| Job '%s:%s' exit with error: %s", j.Name, j.ID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Track the running job now
|
||||
jID := j.ID
|
||||
if isPeriodicJobExecution(j) {
|
||||
jID = fmt.Sprintf("%s@%d", j.ID, j.EnqueuedAt)
|
||||
}
|
||||
|
||||
if tracker, err = rj.ctl.Track(jID); err != nil {
|
||||
// As tracker creation failed, there is no way to mark the job status change.
|
||||
// Also a non nil error return consumes a fail. If all retries are failed here,
|
||||
// it will cause the job to be zombie one (pending forever).
|
||||
// Here we will avoid the job to consume a fail and let it retry again and again.
|
||||
// However, to avoid a forever retry, we will check the FailedAt timestamp.
|
||||
now := time.Now().Unix()
|
||||
if j.FailedAt == 0 || now-j.FailedAt < 2*24*3600 {
|
||||
j.Fails--
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if job.RunningStatus.Compare(job.Status(tracker.Job().Info.Status)) <= 0 {
|
||||
// Probably jobs has been stopped by directly mark status to stopped.
|
||||
// Directly exit and no retry
|
||||
markStopped = bp(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Defer to switch status
|
||||
defer func() {
|
||||
// Switch job status based on the returned error.
|
||||
// The err happened here should not override the job run error, just log it.
|
||||
if err != nil {
|
||||
if er := tracker.Fail(); er != nil {
|
||||
logger.Errorf("Mark job status to failure error: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Nil error might be returned by the stopped job. Check the latest status here.
|
||||
// If refresh latest status failed, let the process to go on to void missing status updating.
|
||||
if latest, er := tracker.Status(); er == nil {
|
||||
if latest == job.StoppedStatus {
|
||||
// Logged
|
||||
logger.Infof("Job %s:%s is stopped", tracker.Job().Info.JobName, tracker.Job().Info.JobID)
|
||||
// Stopped job, no exit message printing.
|
||||
markStopped = bp(true)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Mark job status to success.
|
||||
if er := tracker.Succeed(); er != nil {
|
||||
logger.Errorf("Mark job status to success error: %s", er)
|
||||
}
|
||||
}()
|
||||
|
||||
// Defer to handle runtime error
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
// Log the stack
|
||||
buf := make([]byte, 1<<10)
|
||||
size := runtime.Stack(buf, false)
|
||||
err = errors.Errorf("runtime error: %s; stack: %s", r, buf[0:size])
|
||||
logger.Errorf("Run job %s:%s error: %s", j.Name, j.ID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Build job context
|
||||
if rj.context.JobContext == nil {
|
||||
rj.context.JobContext = impl.NewDefaultContext(rj.context.SystemContext)
|
||||
}
|
||||
if execContext, err = rj.context.JobContext.Build(tracker); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Defer to close logger stream
|
||||
defer func() {
|
||||
// Close open io stream first
|
||||
if closer, ok := execContext.GetLogger().(logger.Closer); ok {
|
||||
if er := closer.Close(); er != nil {
|
||||
logger.Errorf("Close job logger failed: %s", er)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Wrap job
|
||||
runningJob = Wrap(rj.job)
|
||||
// Set status to run
|
||||
if err = tracker.Run(); err != nil {
|
||||
return
|
||||
}
|
||||
// Run the job
|
||||
err = runningJob.Run(execContext, j.Args)
|
||||
// Handle retry
|
||||
rj.retry(runningJob, j)
|
||||
// Handle periodic job execution
|
||||
if isPeriodicJobExecution(j) {
|
||||
if er := tracker.PeriodicExecutionDone(); er != nil {
|
||||
// Just log it
|
||||
logger.Error(er)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (rj *RedisJob) retry(j job.Interface, wj *work.Job) {
|
||||
if !j.ShouldRetry() {
|
||||
// Cancel retry immediately
|
||||
// Make it big enough to avoid retrying
|
||||
wj.Fails = 10000000000
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func isPeriodicJobExecution(j *work.Job) bool {
|
||||
if isPeriodic, ok := j.Args["_job_kind_periodic_"]; ok {
|
||||
if isPeriodicV, yes := isPeriodic.(bool); yes && isPeriodicV {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func bp(b bool) *bool {
|
||||
return &b
|
||||
}
|
229
src/jobservice/runner/redis_test.go
Normal file
229
src/jobservice/runner/redis_test.go
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package runner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/backend"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/tests"
|
||||
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
"github.com/goharbor/harbor/src/jobservice/lcm"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// RedisRunnerTestSuite tests functions of redis runner
|
||||
type RedisRunnerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
lcmCtl lcm.Controller
|
||||
|
||||
envContext *env.Context
|
||||
|
||||
cancel context.CancelFunc
|
||||
namespace string
|
||||
pool *redis.Pool
|
||||
}
|
||||
|
||||
// TestRedisRunnerTestSuite is entry of go test
|
||||
func TestRedisRunnerTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(RedisRunnerTestSuite))
|
||||
}
|
||||
|
||||
// SetupSuite prepares test suite
|
||||
func (suite *RedisRunnerTestSuite) SetupSuite() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
suite.cancel = cancel
|
||||
|
||||
suite.envContext = &env.Context{
|
||||
SystemContext: ctx,
|
||||
WG: new(sync.WaitGroup),
|
||||
ErrorChan: make(chan error, 1),
|
||||
}
|
||||
|
||||
suite.namespace = tests.GiveMeTestNamespace()
|
||||
suite.pool = tests.GiveMeRedisPool()
|
||||
|
||||
suite.lcmCtl = lcm.NewController(
|
||||
suite.envContext,
|
||||
suite.namespace,
|
||||
suite.pool,
|
||||
func(hookURL string, change *job.StatusChange) error { return nil },
|
||||
)
|
||||
|
||||
fakeStats := &job.Stats{
|
||||
Info: &job.StatsInfo{
|
||||
JobID: "FAKE-j",
|
||||
JobName: "fakeParentJob",
|
||||
JobKind: job.KindGeneric,
|
||||
Status: job.PendingStatus.String(),
|
||||
IsUnique: false,
|
||||
},
|
||||
}
|
||||
_, err := suite.lcmCtl.New(fakeStats)
|
||||
require.NoError(suite.T(), err, "lcm new: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// SetupTest prepares test cases
|
||||
func (suite *RedisRunnerTestSuite) SetupTest() {
|
||||
t, err := suite.lcmCtl.Track("FAKE-j")
|
||||
require.NoError(suite.T(), err)
|
||||
err = t.Update("status", job.PendingStatus.String()) // reset
|
||||
assert.NoError(suite.T(), err)
|
||||
}
|
||||
|
||||
// TearDownSuite clears the test suite
|
||||
func (suite *RedisRunnerTestSuite) TearDownSuite() {
|
||||
suite.cancel()
|
||||
}
|
||||
|
||||
// TestJobWrapper tests the redis job wrapper
|
||||
func (suite *RedisRunnerTestSuite) TestJobWrapper() {
|
||||
j := &work.Job{
|
||||
ID: "FAKE-j",
|
||||
Name: "fakeParentJob",
|
||||
EnqueuedAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
}
|
||||
|
||||
oldJobLoggerCfg := config.DefaultConfig.JobLoggerConfigs
|
||||
defer func() {
|
||||
config.DefaultConfig.JobLoggerConfigs = oldJobLoggerCfg
|
||||
}()
|
||||
|
||||
config.DefaultConfig.JobLoggerConfigs = []*config.LoggerConfig{
|
||||
{
|
||||
Name: "STD_OUTPUT",
|
||||
Level: "DEBUG",
|
||||
Settings: map[string]interface{}{
|
||||
"output": backend.StdErr,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "FILE",
|
||||
Level: "ERROR",
|
||||
Settings: map[string]interface{}{
|
||||
"base_dir": os.TempDir(),
|
||||
},
|
||||
Sweeper: &config.LogSweeperConfig{
|
||||
Duration: 5,
|
||||
Settings: map[string]interface{}{
|
||||
"work_dir": os.TempDir(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
redisJob := NewRedisJob((*fakeParentJob)(nil), suite.envContext, suite.lcmCtl)
|
||||
err := redisJob.Run(j)
|
||||
require.NoError(suite.T(), err, "redis job: nil error expected but got %s", err)
|
||||
}
|
||||
|
||||
// TestJobWrapperInvalidTracker tests job runner with invalid job ID
|
||||
func (suite *RedisRunnerTestSuite) TestJobWrapperInvalidTracker() {
|
||||
j := &work.Job{
|
||||
ID: "FAKE-j2",
|
||||
Name: "fakeParentJob",
|
||||
EnqueuedAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
Fails: 3,
|
||||
}
|
||||
|
||||
redisJob := NewRedisJob((*fakeParentJob)(nil), suite.envContext, suite.lcmCtl)
|
||||
err := redisJob.Run(j)
|
||||
require.Error(suite.T(), err, "redis job: non nil error expected but got nil")
|
||||
assert.Equal(suite.T(), int64(2), j.Fails)
|
||||
}
|
||||
|
||||
// TestJobWrapperPanic tests job runner panic
|
||||
func (suite *RedisRunnerTestSuite) TestJobWrapperPanic() {
|
||||
j := &work.Job{
|
||||
ID: "FAKE-j",
|
||||
Name: "fakePanicJob",
|
||||
EnqueuedAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
}
|
||||
|
||||
redisJob := NewRedisJob((*fakePanicJob)(nil), suite.envContext, suite.lcmCtl)
|
||||
err := redisJob.Run(j)
|
||||
assert.Error(suite.T(), err)
|
||||
}
|
||||
|
||||
// TestJobWrapperStopped tests job runner stopped
|
||||
func (suite *RedisRunnerTestSuite) TestJobWrapperStopped() {
|
||||
j := &work.Job{
|
||||
ID: "FAKE-j",
|
||||
Name: "fakePanicJob",
|
||||
EnqueuedAt: time.Now().Add(5 * time.Minute).Unix(),
|
||||
}
|
||||
|
||||
t, err := suite.lcmCtl.Track("FAKE-j")
|
||||
require.NoError(suite.T(), err)
|
||||
err = t.Stop()
|
||||
require.NoError(suite.T(), err)
|
||||
|
||||
redisJob := NewRedisJob((*fakeParentJob)(nil), suite.envContext, suite.lcmCtl)
|
||||
err = redisJob.Run(j)
|
||||
require.NoError(suite.T(), err)
|
||||
}
|
||||
|
||||
type fakeParentJob struct {
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) MaxFails() uint {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) ShouldRetry() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *fakeParentJob) Run(ctx job.Context, params job.Parameters) error {
|
||||
_ = ctx.Checkin("start")
|
||||
ctx.OPCommand()
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakePanicJob struct {
|
||||
}
|
||||
|
||||
func (j *fakePanicJob) MaxFails() uint {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (j *fakePanicJob) ShouldRetry() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (j *fakePanicJob) Validate(params job.Parameters) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *fakePanicJob) Run(ctx job.Context, params job.Parameters) error {
|
||||
panic("for testing")
|
||||
}
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
package runner
|
||||
|
||||
import (
|
||||
"reflect"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user