resolve conflict

This commit is contained in:
Tan Jiang 2018-03-26 18:11:39 +08:00
commit 582deea9e7
21 changed files with 745 additions and 422 deletions

View File

@ -3,12 +3,12 @@ package job
const (
//ImageScanJob is name of scan job it will be used as key to register to job service.
ImageScanJob = "IMAGE_SCAN"
// ImageReplicationTransfer : the name of replication transfer job in job service
ImageReplicationTransfer = "IMAGE_REPLICATION_TRANSFER"
// ImageReplicationDelete : the name of replication delete job in job service
ImageReplicationDelete = "IMAGE_REPLICATION_DELETE"
// ImagePeriodReplication : the name of period replication job in job service
ImagePeriodReplication = "IMAGE_PERIOD_REPLICATION"
// GenericKind marks the job as a generic job, it will be contained in job metadata and passed to job service.
GenericKind = "Generic"
// ImageTransfer : the name of image transfer job in job service
ImageTransfer = "IMAGE_TRANSFER"
// ImageDelete : the name of image delete job in job service
ImageDelete = "IMAGE_DELETE"
// ImageReplicate : the name of image replicate job in job service
ImageReplicate = "IMAGE_REPLICATE"
)

View File

@ -9,9 +9,9 @@ import (
"net/http"
"time"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/config"
"github.com/vmware/harbor/src/jobservice_v2/env"
"github.com/vmware/harbor/src/jobservice_v2/logger"
)
//Server serves the http requests.
@ -91,7 +91,7 @@ func (s *Server) Start() {
var err error
defer func() {
s.context.WG.Done()
log.Infof("API server is gracefully shutdown")
logger.Infof("API server is gracefully shutdown")
}()
if s.config.Protocol == config.JobServiceProtocolHTTPS {
@ -110,13 +110,13 @@ func (s *Server) Start() {
func (s *Server) Stop() {
go func() {
defer func() {
log.Info("Stop API server done!")
logger.Info("Stop API server done!")
}()
shutDownCtx, cancel := context.WithTimeout(s.context.SystemContext, 10*time.Second)
defer cancel()
if err := s.httpServer.Shutdown(shutDownCtx); err != nil {
log.Errorf("Shutdown API server failed with error: %s\n", err)
logger.Errorf("Shutdown API server failed with error: %s\n", err)
}
}()
}

View File

@ -5,7 +5,6 @@ import (
"strings"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/logger"
)
//JobLogger is an implementation of logger.Interface.
@ -17,7 +16,7 @@ type JobLogger struct {
//New logger
//nil might be returned
func New(logPath string, level string) logger.Interface {
func New(logPath string, level string) *JobLogger {
f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil

View File

@ -0,0 +1,74 @@
package logger
import (
"os"
"github.com/vmware/harbor/src/common/utils/log"
)
//ServiceLogger is an implementation of logger.Interface.
//It used to log info in workerpool components.
type ServiceLogger struct {
backendLogger *log.Logger
}
//NewServiceLogger to create new logger for job service
//nil might be returned
func NewServiceLogger(level string) *ServiceLogger {
logLevel := parseLevel(level)
backendLogger := log.New(os.Stdout, log.NewTextFormatter(), logLevel)
return &ServiceLogger{
backendLogger: backendLogger,
}
}
//Debug ...
func (sl *ServiceLogger) Debug(v ...interface{}) {
sl.backendLogger.Debug(v...)
}
//Debugf with format
func (sl *ServiceLogger) Debugf(format string, v ...interface{}) {
sl.backendLogger.Debugf(format, v...)
}
//Info ...
func (sl *ServiceLogger) Info(v ...interface{}) {
sl.backendLogger.Info(v...)
}
//Infof with format
func (sl *ServiceLogger) Infof(format string, v ...interface{}) {
sl.backendLogger.Infof(format, v...)
}
//Warning ...
func (sl *ServiceLogger) Warning(v ...interface{}) {
sl.backendLogger.Warning(v...)
}
//Warningf with format
func (sl *ServiceLogger) Warningf(format string, v ...interface{}) {
sl.backendLogger.Warningf(format, v...)
}
//Error ...
func (sl *ServiceLogger) Error(v ...interface{}) {
sl.backendLogger.Error(v...)
}
//Errorf with format
func (sl *ServiceLogger) Errorf(format string, v ...interface{}) {
sl.backendLogger.Errorf(format, v...)
}
//Fatal error
func (sl *ServiceLogger) Fatal(v ...interface{}) {
sl.backendLogger.Fatal(v...)
}
//Fatalf error
func (sl *ServiceLogger) Fatalf(format string, v ...interface{}) {
sl.backendLogger.Fatalf(format, v...)
}

View File

@ -4,9 +4,9 @@ import (
"net/http"
common_http "github.com/vmware/harbor/src/common/http"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/common/utils/registry/auth"
"github.com/vmware/harbor/src/jobservice_v2/env"
"github.com/vmware/harbor/src/jobservice_v2/logger"
)
// Deleter deletes repository or images on the destination registry
@ -14,7 +14,7 @@ type Deleter struct {
ctx env.JobContext
repository *repository
dstRegistry *registry
logger *log.Logger
logger logger.Interface
retry bool
}
@ -49,8 +49,7 @@ func (d *Deleter) run(ctx env.JobContext, params map[string]interface{}) error {
}
func (d *Deleter) init(ctx env.JobContext, params map[string]interface{}) error {
// TODO
d.logger = log.DefaultLogger()
d.logger = ctx.GetLogger()
d.ctx = ctx
if canceled(d.ctx) {

View File

@ -0,0 +1,25 @@
package replication
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMaxFailsOfDeleter(t *testing.T) {
d := &Deleter{}
assert.Equal(t, uint(3), d.MaxFails())
}
func TestValidateOfDeleter(t *testing.T) {
d := &Deleter{}
require.Nil(t, d.Validate(nil))
}
func TestShouldRetryOfDeleter(t *testing.T) {
d := &Deleter{}
assert.False(t, d.ShouldRetry())
d.retry = true
assert.True(t, d.ShouldRetry())
}

View File

@ -1,49 +1,36 @@
package replication
import (
"errors"
"fmt"
"net"
"net/http"
"os"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
common_http "github.com/vmware/harbor/src/common/http"
"github.com/vmware/harbor/src/common/http/modifier"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils"
"github.com/vmware/harbor/src/common/utils/log"
reg "github.com/vmware/harbor/src/common/utils/registry"
"github.com/vmware/harbor/src/common/utils/registry/auth"
"github.com/vmware/harbor/src/jobservice_v2/env"
job_utils "github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
"github.com/vmware/harbor/src/jobservice_v2/logger"
)
var (
errCanceled = errors.New("the job is canceled")
)
// Replicator replicates images from source registry to the destination one
// Replicator call UI's API to start a repliation according to the policy ID
// passed in parameters
type Replicator struct {
ctx env.JobContext
repository *repository
srcRegistry *registry
dstRegistry *registry
logger *log.Logger
retry bool
ctx env.JobContext
url string // the URL of UI service
insecure bool
policyID int64
client *common_http.Client
logger logger.Interface
}
// ShouldRetry : retry if the error is network error
// ShouldRetry ...
func (r *Replicator) ShouldRetry() bool {
return r.retry
return false
}
// MaxFails ...
func (r *Replicator) MaxFails() uint {
return 3
return 0
}
// Validate ....
@ -53,296 +40,48 @@ func (r *Replicator) Validate(params map[string]interface{}) error {
// Run ...
func (r *Replicator) Run(ctx env.JobContext, params map[string]interface{}) error {
err := r.run(ctx, params)
r.retry = retry(err)
return err
}
func (r *Replicator) run(ctx env.JobContext, params map[string]interface{}) error {
// initialize
if err := r.init(ctx, params); err != nil {
return err
}
// try to create project on destination registry
if err := r.createProject(); err != nil {
return err
}
// replicate the images
for _, tag := range r.repository.tags {
digest, manifest, err := r.pullManifest(tag)
if err != nil {
return err
}
if err := r.transferLayers(tag, manifest.References()); err != nil {
return err
}
if err := r.pushManifest(tag, digest, manifest); err != nil {
return err
}
}
return nil
return r.replicate()
}
func (r *Replicator) init(ctx env.JobContext, params map[string]interface{}) error {
// TODO
r.logger = log.DefaultLogger()
r.logger = ctx.GetLogger()
r.ctx = ctx
if canceled(r.ctx) {
r.logger.Warning(errCanceled.Error())
return errCanceled
}
// init images that need to be replicated
r.repository = &repository{
name: params["repository"].(string),
}
if tags, ok := params["tags"]; ok {
tgs := tags.([]interface{})
for _, tg := range tgs {
r.repository.tags = append(r.repository.tags, tg.(string))
}
}
var err error
// init source registry client
srcURL := params["src_registry_url"].(string)
srcInsecure := params["src_registry_insecure"].(bool)
srcCred := auth.NewCookieCredential(&http.Cookie{
r.policyID = (int64)(params["policy_id"].(float64))
r.url = params["url"].(string)
r.insecure = params["insecure"].(bool)
cred := auth.NewCookieCredential(&http.Cookie{
Name: models.UISecretCookie,
Value: os.Getenv("JOBSERVICE_SECRET"),
Value: secret(),
})
srcTokenServiceURL := ""
if stsu, ok := params["src_token_service_url"]; ok {
srcTokenServiceURL = stsu.(string)
}
if len(srcTokenServiceURL) > 0 {
r.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, r.repository.name, srcTokenServiceURL)
} else {
r.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, r.repository.name)
}
if err != nil {
r.logger.Errorf("failed to create client for source registry: %v", err)
return err
}
r.client = common_http.NewClient(&http.Client{
Transport: reg.GetHTTPTransport(r.insecure),
}, cred)
// init destination registry client
dstURL := params["dst_registry_url"].(string)
dstInsecure := params["dst_registry_insecure"].(bool)
dstCred := auth.NewBasicAuthCredential(
params["dst_registry_username"].(string),
params["dst_registry_password"].(string))
r.dstRegistry, err = initRegistry(dstURL, dstInsecure, dstCred, r.repository.name)
if err != nil {
r.logger.Errorf("failed to create client for destination registry: %v", err)
return err
}
// get the tag list first if it is null
if len(r.repository.tags) == 0 {
tags, err := r.srcRegistry.ListTag()
if err != nil {
r.logger.Errorf("an error occurred while listing tags for the source repository: %v", err)
return err
}
if len(tags) == 0 {
err = fmt.Errorf("empty tag list for repository %s", r.repository.name)
r.logger.Error(err)
return err
}
r.repository.tags = tags
}
r.logger.Infof("initialization completed: repository: %s, tags: %v, source registry: URL-%s insecure-%v, destination registry: URL-%s insecure-%v",
r.repository.name, r.repository.tags, r.srcRegistry.url, r.srcRegistry.insecure, r.dstRegistry.url, r.dstRegistry.insecure)
r.logger.Infof("initialization completed: policy ID: %d, URL: %s, insecure: %v",
r.policyID, r.url, r.insecure)
return nil
}
func initRegistry(url string, insecure bool, credential modifier.Modifier,
repository string, tokenServiceURL ...string) (*registry, error) {
registry := &registry{
url: url,
insecure: insecure,
}
// use the same transport for clients connecting to docker registry and Harbor UI
transport := reg.GetHTTPTransport(insecure)
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
Transport: transport,
}, credential, tokenServiceURL...)
uam := &job_utils.UserAgentModifier{
UserAgent: "harbor-registry-client",
}
repositoryClient, err := reg.NewRepository(repository, url,
&http.Client{
Transport: reg.NewTransport(transport, authorizer, uam),
})
if err != nil {
return nil, err
}
registry.Repository = *repositoryClient
registry.client = common_http.NewClient(
&http.Client{
Transport: transport,
}, credential)
return registry, nil
}
func (r *Replicator) createProject() error {
if canceled(r.ctx) {
r.logger.Warning(errCanceled.Error())
return errCanceled
}
p, _ := utils.ParseRepository(r.repository.name)
project, err := r.srcRegistry.GetProject(p)
if err != nil {
r.logger.Errorf("failed to get project %s from source registry: %v", p, err)
func (r *Replicator) replicate() error {
if err := r.client.Post(fmt.Sprintf("%s/api/replications", r.url), struct {
PolicyID int64 `json:"policy_id"`
}{
PolicyID: r.policyID,
}); err != nil {
r.logger.Errorf("failed to send the replication request to %s: %v", r.url, err)
return err
}
if err = r.dstRegistry.CreateProject(project); err != nil {
// other jobs may be also doing the same thing when the current job
// is creating project or the project has already exist, so when the
// response code is 409, continue to do next step
if e, ok := err.(*common_http.Error); ok && e.Code == http.StatusConflict {
r.logger.Warningf("the status code is 409 when creating project %s on destination registry, try to do next step", p)
return nil
}
r.logger.Errorf("an error occurred while creating project %s on destination registry: %v", p, err)
return err
}
r.logger.Infof("project %s is created on destination registry", p)
r.logger.Infof("the replication request has been sent to %s successfully", r.url)
return nil
}
func (r *Replicator) pullManifest(tag string) (string, distribution.Manifest, error) {
if canceled(r.ctx) {
r.logger.Warning(errCanceled.Error())
return "", nil, errCanceled
}
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
digest, mediaType, payload, err := r.srcRegistry.PullManifest(tag, acceptMediaTypes)
if err != nil {
r.logger.Errorf("an error occurred while pulling manifest of %s:%s from source registry: %v",
r.repository.name, tag, err)
return "", nil, err
}
r.logger.Infof("manifest of %s:%s pulled successfully from source registry: %s",
r.repository.name, tag, digest)
if strings.Contains(mediaType, "application/json") {
mediaType = schema1.MediaTypeManifest
}
manifest, _, err := reg.UnMarshal(mediaType, payload)
if err != nil {
r.logger.Errorf("an error occurred while parsing manifest: %v", err)
return "", nil, err
}
return digest, manifest, nil
}
func (r *Replicator) transferLayers(tag string, blobs []distribution.Descriptor) error {
repository := r.repository.name
// all blobs(layers and config)
for _, blob := range blobs {
if canceled(r.ctx) {
r.logger.Warning(errCanceled.Error())
return errCanceled
}
digest := blob.Digest.String()
exist, err := r.dstRegistry.BlobExist(digest)
if err != nil {
r.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on destination registry: %v",
digest, repository, tag, err)
return err
}
if exist {
r.logger.Infof("blob %s of %s:%s already exists on the destination registry, skip",
digest, repository, tag)
continue
}
r.logger.Infof("transferring blob %s of %s:%s to the destination registry ...",
digest, repository, tag)
size, data, err := r.srcRegistry.PullBlob(digest)
if err != nil {
r.logger.Errorf("an error occurred while pulling blob %s of %s:%s from the source registry: %v",
digest, repository, tag, err)
return err
}
if data != nil {
defer data.Close()
}
if err = r.dstRegistry.PushBlob(digest, size, data); err != nil {
r.logger.Errorf("an error occurred while pushing blob %s of %s:%s to the distination registry: %v",
digest, repository, tag, err)
return err
}
r.logger.Infof("blob %s of %s:%s transferred to the destination registry completed",
digest, repository, tag)
}
return nil
}
func (r *Replicator) pushManifest(tag, digest string, manifest distribution.Manifest) error {
if canceled(r.ctx) {
r.logger.Warning(errCanceled.Error())
return errCanceled
}
repository := r.repository.name
_, exist, err := r.dstRegistry.ManifestExist(digest)
if err != nil {
r.logger.Warningf("an error occurred while checking the existence of manifest of %s:%s on the destination registry: %v, try to push manifest",
repository, tag, err)
} else {
if exist {
r.logger.Infof("manifest of %s:%s exists on the destination registry, skip manifest pushing",
repository, tag)
return nil
}
}
mediaType, data, err := manifest.Payload()
if err != nil {
r.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v",
repository, tag, err)
return err
}
if _, err = r.dstRegistry.PushManifest(tag, mediaType, data); err != nil {
r.logger.Errorf("an error occurred while pushing manifest of %s:%s to the destination registry: %v",
repository, tag, err)
return err
}
r.logger.Infof("manifest of %s:%s has been pushed to the destination registry",
repository, tag)
return nil
}
func canceled(ctx env.JobContext) bool {
_, canceled := ctx.OPCommand()
return canceled
}
func retry(err error) bool {
if err == nil {
return false
}
_, ok := err.(net.Error)
return ok
}

View File

@ -7,19 +7,17 @@ import (
"github.com/stretchr/testify/require"
)
func TestMaxFails(t *testing.T) {
func TestMaxFailsOfReplicator(t *testing.T) {
r := &Replicator{}
assert.Equal(t, uint(3), r.MaxFails())
assert.Equal(t, uint(0), r.MaxFails())
}
func TestValidate(t *testing.T) {
func TestValidateOfReplicator(t *testing.T) {
r := &Replicator{}
require.Nil(t, r.Validate(nil))
}
func TestShouldRetry(t *testing.T) {
func TestShouldRetryOfReplicator(t *testing.T) {
r := &Replicator{}
assert.False(t, r.retry)
r.retry = true
assert.True(t, r.retry)
assert.False(t, r.ShouldRetry())
}

View File

@ -0,0 +1,351 @@
package replication
import (
"errors"
"fmt"
"net"
"net/http"
"os"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
common_http "github.com/vmware/harbor/src/common/http"
"github.com/vmware/harbor/src/common/http/modifier"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils"
reg "github.com/vmware/harbor/src/common/utils/registry"
"github.com/vmware/harbor/src/common/utils/registry/auth"
"github.com/vmware/harbor/src/jobservice_v2/env"
job_utils "github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
"github.com/vmware/harbor/src/jobservice_v2/logger"
)
var (
errCanceled = errors.New("the job is canceled")
)
// Transfer images from source registry to the destination one
type Transfer struct {
ctx env.JobContext
repository *repository
srcRegistry *registry
dstRegistry *registry
logger logger.Interface
retry bool
}
// ShouldRetry : retry if the error is network error
func (t *Transfer) ShouldRetry() bool {
return t.retry
}
// MaxFails ...
func (t *Transfer) MaxFails() uint {
return 3
}
// Validate ....
func (t *Transfer) Validate(params map[string]interface{}) error {
return nil
}
// Run ...
func (t *Transfer) Run(ctx env.JobContext, params map[string]interface{}) error {
err := t.run(ctx, params)
t.retry = retry(err)
return err
}
func (t *Transfer) run(ctx env.JobContext, params map[string]interface{}) error {
// initialize
if err := t.init(ctx, params); err != nil {
return err
}
// try to create project on destination registry
if err := t.createProject(); err != nil {
return err
}
// replicate the images
for _, tag := range t.repository.tags {
digest, manifest, err := t.pullManifest(tag)
if err != nil {
return err
}
if err := t.transferLayers(tag, manifest.References()); err != nil {
return err
}
if err := t.pushManifest(tag, digest, manifest); err != nil {
return err
}
}
return nil
}
func (t *Transfer) init(ctx env.JobContext, params map[string]interface{}) error {
t.logger = ctx.GetLogger()
t.ctx = ctx
if canceled(t.ctx) {
t.logger.Warning(errCanceled.Error())
return errCanceled
}
// init images that need to be replicated
t.repository = &repository{
name: params["repository"].(string),
}
if tags, ok := params["tags"]; ok {
tgs := tags.([]interface{})
for _, tg := range tgs {
t.repository.tags = append(t.repository.tags, tg.(string))
}
}
var err error
// init source registry client
srcURL := params["src_registry_url"].(string)
srcInsecure := params["src_registry_insecure"].(bool)
srcCred := auth.NewCookieCredential(&http.Cookie{
Name: models.UISecretCookie,
Value: secret(),
})
srcTokenServiceURL := ""
if stsu, ok := params["src_token_service_url"]; ok {
srcTokenServiceURL = stsu.(string)
}
if len(srcTokenServiceURL) > 0 {
t.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, t.repository.name, srcTokenServiceURL)
} else {
t.srcRegistry, err = initRegistry(srcURL, srcInsecure, srcCred, t.repository.name)
}
if err != nil {
t.logger.Errorf("failed to create client for source registry: %v", err)
return err
}
// init destination registry client
dstURL := params["dst_registry_url"].(string)
dstInsecure := params["dst_registry_insecure"].(bool)
dstCred := auth.NewBasicAuthCredential(
params["dst_registry_username"].(string),
params["dst_registry_password"].(string))
t.dstRegistry, err = initRegistry(dstURL, dstInsecure, dstCred, t.repository.name)
if err != nil {
t.logger.Errorf("failed to create client for destination registry: %v", err)
return err
}
// get the tag list first if it is null
if len(t.repository.tags) == 0 {
tags, err := t.srcRegistry.ListTag()
if err != nil {
t.logger.Errorf("an error occurred while listing tags for the source repository: %v", err)
return err
}
if len(tags) == 0 {
err = fmt.Errorf("empty tag list for repository %s", t.repository.name)
t.logger.Error(err)
return err
}
t.repository.tags = tags
}
t.logger.Infof("initialization completed: repository: %s, tags: %v, source registry: URL-%s insecure-%v, destination registry: URL-%s insecure-%v",
t.repository.name, t.repository.tags, t.srcRegistry.url, t.srcRegistry.insecure, t.dstRegistry.url, t.dstRegistry.insecure)
return nil
}
func initRegistry(url string, insecure bool, credential modifier.Modifier,
repository string, tokenServiceURL ...string) (*registry, error) {
registry := &registry{
url: url,
insecure: insecure,
}
// use the same transport for clients connecting to docker registry and Harbor UI
transport := reg.GetHTTPTransport(insecure)
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
Transport: transport,
}, credential, tokenServiceURL...)
uam := &job_utils.UserAgentModifier{
UserAgent: "harbor-registry-client",
}
repositoryClient, err := reg.NewRepository(repository, url,
&http.Client{
Transport: reg.NewTransport(transport, authorizer, uam),
})
if err != nil {
return nil, err
}
registry.Repository = *repositoryClient
registry.client = common_http.NewClient(
&http.Client{
Transport: transport,
}, credential)
return registry, nil
}
func (t *Transfer) createProject() error {
if canceled(t.ctx) {
t.logger.Warning(errCanceled.Error())
return errCanceled
}
p, _ := utils.ParseRepository(t.repository.name)
project, err := t.srcRegistry.GetProject(p)
if err != nil {
t.logger.Errorf("failed to get project %s from source registry: %v", p, err)
return err
}
if err = t.dstRegistry.CreateProject(project); err != nil {
// other jobs may be also doing the same thing when the current job
// is creating project or the project has already exist, so when the
// response code is 409, continue to do next step
if e, ok := err.(*common_http.Error); ok && e.Code == http.StatusConflict {
t.logger.Warningf("the status code is 409 when creating project %s on destination registry, try to do next step", p)
return nil
}
t.logger.Errorf("an error occurred while creating project %s on destination registry: %v", p, err)
return err
}
t.logger.Infof("project %s is created on destination registry", p)
return nil
}
func (t *Transfer) pullManifest(tag string) (string, distribution.Manifest, error) {
if canceled(t.ctx) {
t.logger.Warning(errCanceled.Error())
return "", nil, errCanceled
}
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
digest, mediaType, payload, err := t.srcRegistry.PullManifest(tag, acceptMediaTypes)
if err != nil {
t.logger.Errorf("an error occurred while pulling manifest of %s:%s from source registry: %v",
t.repository.name, tag, err)
return "", nil, err
}
t.logger.Infof("manifest of %s:%s pulled successfully from source registry: %s",
t.repository.name, tag, digest)
if strings.Contains(mediaType, "application/json") {
mediaType = schema1.MediaTypeManifest
}
manifest, _, err := reg.UnMarshal(mediaType, payload)
if err != nil {
t.logger.Errorf("an error occurred while parsing manifest: %v", err)
return "", nil, err
}
return digest, manifest, nil
}
func (t *Transfer) transferLayers(tag string, blobs []distribution.Descriptor) error {
repository := t.repository.name
// all blobs(layers and config)
for _, blob := range blobs {
if canceled(t.ctx) {
t.logger.Warning(errCanceled.Error())
return errCanceled
}
digest := blob.Digest.String()
exist, err := t.dstRegistry.BlobExist(digest)
if err != nil {
t.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on destination registry: %v",
digest, repository, tag, err)
return err
}
if exist {
t.logger.Infof("blob %s of %s:%s already exists on the destination registry, skip",
digest, repository, tag)
continue
}
t.logger.Infof("transferring blob %s of %s:%s to the destination registry ...",
digest, repository, tag)
size, data, err := t.srcRegistry.PullBlob(digest)
if err != nil {
t.logger.Errorf("an error occurred while pulling blob %s of %s:%s from the source registry: %v",
digest, repository, tag, err)
return err
}
if data != nil {
defer data.Close()
}
if err = t.dstRegistry.PushBlob(digest, size, data); err != nil {
t.logger.Errorf("an error occurred while pushing blob %s of %s:%s to the distination registry: %v",
digest, repository, tag, err)
return err
}
t.logger.Infof("blob %s of %s:%s transferred to the destination registry completed",
digest, repository, tag)
}
return nil
}
func (t *Transfer) pushManifest(tag, digest string, manifest distribution.Manifest) error {
if canceled(t.ctx) {
t.logger.Warning(errCanceled.Error())
return errCanceled
}
repository := t.repository.name
_, exist, err := t.dstRegistry.ManifestExist(digest)
if err != nil {
t.logger.Warningf("an error occurred while checking the existence of manifest of %s:%s on the destination registry: %v, try to push manifest",
repository, tag, err)
} else {
if exist {
t.logger.Infof("manifest of %s:%s exists on the destination registry, skip manifest pushing",
repository, tag)
return nil
}
}
mediaType, data, err := manifest.Payload()
if err != nil {
t.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v",
repository, tag, err)
return err
}
if _, err = t.dstRegistry.PushManifest(tag, mediaType, data); err != nil {
t.logger.Errorf("an error occurred while pushing manifest of %s:%s to the destination registry: %v",
repository, tag, err)
return err
}
t.logger.Infof("manifest of %s:%s has been pushed to the destination registry",
repository, tag)
return nil
}
func canceled(ctx env.JobContext) bool {
_, canceled := ctx.OPCommand()
return canceled
}
func retry(err error) bool {
if err == nil {
return false
}
_, ok := err.(net.Error)
return ok
}
func secret() string {
return os.Getenv("JOBSERVICE_SECRET")
}

View File

@ -0,0 +1,25 @@
package replication
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMaxFailsOfTransfer(t *testing.T) {
r := &Transfer{}
assert.Equal(t, uint(3), r.MaxFails())
}
func TestValidateOfTransfer(t *testing.T) {
r := &Transfer{}
require.Nil(t, r.Validate(nil))
}
func TestShouldRetryOfTransfer(t *testing.T) {
r := &Transfer{}
assert.False(t, r.ShouldRetry())
r.retry = true
assert.True(t, r.ShouldRetry())
}

View File

@ -0,0 +1,115 @@
// Copyright 2018 The Harbor Authors. All rights reserved.
package logger
import (
"log"
)
//sLogger is used to log for workerpool itself
var sLogger Interface
//SetLogger sets the logger implementation
func SetLogger(logger Interface) {
sLogger = logger
}
//Debug ...
func Debug(v ...interface{}) {
if sLogger != nil {
sLogger.Debug(v...)
return
}
log.Println(v...)
}
//Debugf for debuging with format
func Debugf(format string, v ...interface{}) {
if sLogger != nil {
sLogger.Debugf(format, v...)
return
}
log.Printf(format, v...)
}
//Info ...
func Info(v ...interface{}) {
if sLogger != nil {
sLogger.Info(v...)
return
}
log.Println(v...)
}
//Infof for logging info with format
func Infof(format string, v ...interface{}) {
if sLogger != nil {
sLogger.Infof(format, v...)
return
}
log.Printf(format, v...)
}
//Warning ...
func Warning(v ...interface{}) {
if sLogger != nil {
sLogger.Warning(v...)
return
}
log.Println(v...)
}
//Warningf for warning with format
func Warningf(format string, v ...interface{}) {
if sLogger != nil {
sLogger.Warningf(format, v...)
return
}
log.Printf(format, v...)
}
//Error for logging error
func Error(v ...interface{}) {
if sLogger != nil {
sLogger.Error(v...)
return
}
log.Println(v...)
}
//Errorf for logging error with format
func Errorf(format string, v ...interface{}) {
if sLogger != nil {
sLogger.Errorf(format, v...)
return
}
log.Printf(format, v...)
}
//Fatal ...
func Fatal(v ...interface{}) {
if sLogger != nil {
sLogger.Fatal(v...)
return
}
log.Fatal(v...)
}
//Fatalf for fatal error with error
func Fatalf(format string, v ...interface{}) {
if sLogger != nil {
sLogger.Fatalf(format, v...)
return
}
log.Fatalf(format, v...)
}

View File

@ -8,8 +8,6 @@ import (
"io/ioutil"
"os"
"time"
"github.com/vmware/harbor/src/common/utils/log"
)
const (
@ -31,7 +29,7 @@ func NewSweeper(ctx context.Context, workDir string, period uint) *Sweeper {
//Start to work
func (s *Sweeper) Start() {
go s.loop()
log.Info("Logger sweeper is started")
Info("Logger sweeper is started")
}
func (s *Sweeper) loop() {
@ -41,7 +39,7 @@ func (s *Sweeper) loop() {
}
defer func() {
log.Info("Logger sweeper is stopped")
Info("Logger sweeper is stopped")
}()
//First run
@ -66,14 +64,14 @@ func (s *Sweeper) clear() {
count = &cleared
)
log.Info("Start to clear the job outdated log files")
Info("Start to clear the job outdated log files")
defer func() {
log.Infof("%d job outdated log files cleared", *count)
Infof("%d job outdated log files cleared", *count)
}()
logFiles, err := ioutil.ReadDir(s.workDir)
if err != nil {
log.Errorf("Failed to get the outdated log files under '%s' with error: %s\n", s.workDir, err)
Errorf("Failed to get the outdated log files under '%s' with error: %s\n", s.workDir, err)
return
}
if len(logFiles) == 0 {
@ -86,7 +84,7 @@ func (s *Sweeper) clear() {
if err := os.Remove(logFilePath); err == nil {
cleared++
} else {
log.Warningf("Failed to remove log file '%s'\n", logFilePath)
Warningf("Failed to remove log file '%s'\n", logFilePath)
}
}
}

View File

@ -9,6 +9,8 @@ import (
"github.com/vmware/harbor/src/jobservice_v2/config"
"github.com/vmware/harbor/src/jobservice_v2/env"
"github.com/vmware/harbor/src/jobservice_v2/job/impl"
ilogger "github.com/vmware/harbor/src/jobservice_v2/job/impl/logger"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/runtime"
"github.com/vmware/harbor/src/jobservice_v2/utils"
)
@ -25,6 +27,12 @@ func main() {
return
}
//Load configurations
if err := config.DefaultConfig.Load(*configPath, true); err != nil {
fmt.Printf("Failed to load configurations with error: %s\n", err)
return
}
//Set job context initializer
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
secret := config.GetAuthSecret()
@ -42,6 +50,10 @@ func main() {
return jobCtx, nil
})
//New logger for job service
sLogger := ilogger.NewServiceLogger(config.GetLogLevel())
logger.SetLogger(sLogger)
//Start
runtime.JobService.LoadAndRun(*configPath, true)
runtime.JobService.LoadAndRun()
}

View File

@ -13,6 +13,7 @@ import (
"time"
"github.com/vmware/harbor/src/jobservice_v2/errs"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/period"
@ -21,7 +22,6 @@ import (
"github.com/gocraft/work"
"github.com/garyburd/redigo/redis"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/job"
"github.com/vmware/harbor/src/jobservice_v2/models"
"github.com/vmware/harbor/src/jobservice_v2/utils"
@ -94,7 +94,7 @@ func (rjs *RedisJobStatsManager) Start() {
go rjs.loop()
rjs.isRunning = true
log.Info("Redis job stats manager is started")
logger.Info("Redis job stats manager is started")
}
//Shutdown is implementation of same method in JobStatsManager interface.
@ -152,7 +152,7 @@ func (rjs *RedisJobStatsManager) loop() {
rjs.isRunning = false
//Notify other sub goroutines
close(controlChan)
log.Info("Redis job stats manager is stopped")
logger.Info("Redis job stats manager is stopped")
}()
for {
@ -176,7 +176,7 @@ func (rjs *RedisJobStatsManager) loop() {
}
}()
} else {
log.Warningf("Failed to process '%s' request with error: %s (%d times tried)\n", item.op, err, maxFails)
logger.Warningf("Failed to process '%s' request with error: %s (%d times tried)\n", item.op, err, maxFails)
if item.op == opReportStatus {
clearHookCache = true
}
@ -238,7 +238,7 @@ func (rjs *RedisJobStatsManager) Stop(jobID string) error {
//thirdly expire the job stats of this periodic job if exists
if err := rjs.expirePeriodicJobStats(theJob.Stats.JobID); err != nil {
//only logged
log.Errorf("Expire the stats of job %s failed with error: %s\n", theJob.Stats.JobID, err)
logger.Errorf("Expire the stats of job %s failed with error: %s\n", theJob.Stats.JobID, err)
}
default:
break
@ -378,7 +378,7 @@ func (rjs *RedisJobStatsManager) submitStatusReportingItem(jobID string, status,
hookURL, err = rjs.getHook(jobID)
if err != nil {
//logged and exit
log.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
logger.Warningf("no status hook found for job %s\n, abandon status reporting", jobID)
return
}
}
@ -418,7 +418,7 @@ func (rjs *RedisJobStatsManager) expirePeriodicJobStats(jobID string) error {
func (rjs *RedisJobStatsManager) deleteScheduledJobsOfPeriodicPolicy(policyID string, cronSpec string) error {
schedule, err := cron.Parse(cronSpec)
if err != nil {
log.Errorf("cron spec '%s' is not valid", cronSpec)
logger.Errorf("cron spec '%s' is not valid", cronSpec)
return err
}
@ -432,7 +432,7 @@ func (rjs *RedisJobStatsManager) deleteScheduledJobsOfPeriodicPolicy(policyID st
epoch := t.Unix()
if err = rjs.client.DeleteScheduledJob(epoch, policyID); err != nil {
//only logged
log.Warningf("delete scheduled instance for periodic job %s failed with error: %s\n", policyID, err)
logger.Warningf("delete scheduled instance for periodic job %s failed with error: %s\n", policyID, err)
}
}
@ -453,7 +453,7 @@ func (rjs *RedisJobStatsManager) getCrlCommand(jobID string) (string, error) {
_, err = conn.Do("DEL", key)
if err != nil {
//only logged
log.Errorf("del key %s failed with error: %s\n", key, err)
logger.Errorf("del key %s failed with error: %s\n", key, err)
}
return cmd, nil

View File

@ -9,8 +9,8 @@ import (
"github.com/garyburd/redigo/redis"
"github.com/gocraft/work"
"github.com/robfig/cron"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/job"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/utils"
)
@ -52,7 +52,7 @@ func newPeriodicEnqueuer(namespace string, pool *redis.Pool, policyStore *period
func (pe *periodicEnqueuer) start() {
go pe.loop()
log.Info("Periodic enqueuer is started")
logger.Info("Periodic enqueuer is started")
}
func (pe *periodicEnqueuer) stop() {
@ -62,7 +62,7 @@ func (pe *periodicEnqueuer) stop() {
func (pe *periodicEnqueuer) loop() {
defer func() {
log.Info("Periodic enqueuer is stopped")
logger.Info("Periodic enqueuer is stopped")
}()
// Begin reaping periodically
timer := time.NewTimer(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
@ -71,7 +71,7 @@ func (pe *periodicEnqueuer) loop() {
if pe.shouldEnqueue() {
err := pe.enqueue()
if err != nil {
log.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
}
}
@ -85,7 +85,7 @@ func (pe *periodicEnqueuer) loop() {
if pe.shouldEnqueue() {
err := pe.enqueue()
if err != nil {
log.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
logger.Errorf("periodic_enqueuer.loop.enqueue:%s\n", err)
}
}
}
@ -133,7 +133,7 @@ func (pe *periodicEnqueuer) enqueue() error {
return err
}
log.Infof("Schedule job %s for policy %s at %d\n", pj.jobName, pl.PolicyID, epoch)
logger.Infof("Schedule job %s for policy %s at %d\n", pj.jobName, pl.PolicyID, epoch)
}
//Directly use redis conn to update the periodic job (policy) status
//Do not care the result
@ -153,7 +153,7 @@ func (pe *periodicEnqueuer) shouldEnqueue() bool {
if err == redis.ErrNil {
return true
} else if err != nil {
log.Errorf("periodic_enqueuer.should_enqueue:%s\n", err)
logger.Errorf("periodic_enqueuer.should_enqueue:%s\n", err)
return true
}

View File

@ -12,8 +12,8 @@ import (
"github.com/robfig/cron"
"github.com/garyburd/redigo/redis"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/env"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/models"
"github.com/vmware/harbor/src/jobservice_v2/utils"
)
@ -54,7 +54,7 @@ func NewRedisPeriodicScheduler(ctx *env.Context, namespace string, redisPool *re
//Start to serve
func (rps *RedisPeriodicScheduler) Start() {
defer func() {
log.Info("Redis scheduler is stopped")
logger.Info("Redis scheduler is stopped")
}()
//Load existing periodic job policies
@ -67,7 +67,7 @@ func (rps *RedisPeriodicScheduler) Start() {
//start enqueuer
rps.enqueuer.start()
defer rps.enqueuer.stop()
log.Info("Redis scheduler is started")
logger.Info("Redis scheduler is started")
//blocking here
<-rps.context.SystemContext.Done()
@ -234,14 +234,14 @@ func (rps *RedisPeriodicScheduler) Load() error {
if err := policy.DeSerialize(rawPolicy); err != nil {
//Ignore error which means the policy data is not valid
//Only logged
log.Warningf("failed to deserialize periodic policy with error:%s; raw data: %s\n", err, rawPolicy)
logger.Warningf("failed to deserialize periodic policy with error:%s; raw data: %s\n", err, rawPolicy)
continue
}
score, err := strconv.ParseInt(string(rawScore), 10, 64)
if err != nil {
//Ignore error which means the policy data is not valid
//Only logged
log.Warningf("failed to parse the score of the periodic policy with error:%s\n", err)
logger.Warningf("failed to parse the score of the periodic policy with error:%s\n", err)
continue
}
@ -261,7 +261,7 @@ func (rps *RedisPeriodicScheduler) Load() error {
rps.pstore.addAll(allPeriodicPolicies)
}
log.Infof("Load %d periodic job policies", len(allPeriodicPolicies))
logger.Infof("Load %d periodic job policies", len(allPeriodicPolicies))
return nil
}

View File

@ -9,7 +9,7 @@ import (
"github.com/gocraft/work"
"github.com/garyburd/redigo/redis"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/utils"
)
@ -49,7 +49,7 @@ func (s *Sweeper) ClearOutdatedScheduledJobs() error {
if r == nil {
//Action is already locked by other workerpool
log.Info("Ignore clear outdated scheduled jobs")
logger.Info("Ignore clear outdated scheduled jobs")
return nil
}
@ -71,7 +71,7 @@ func (s *Sweeper) ClearOutdatedScheduledJobs() error {
allErrors = append(allErrors, err)
}
log.Infof("Clear outdated scheduled job: %s run at %#v\n", j.ID, time.Unix(jobScore.Score, 0).String())
logger.Infof("Clear outdated scheduled job: %s run at %#v\n", j.ID, time.Unix(jobScore.Score, 0).String())
}
//Unlock

View File

@ -9,11 +9,11 @@ import (
"reflect"
"time"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/opm"
"github.com/vmware/harbor/src/jobservice_v2/period"
"github.com/garyburd/redigo/redis"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/models"
"github.com/vmware/harbor/src/jobservice_v2/utils"
)
@ -39,7 +39,7 @@ func NewMessageServer(ctx context.Context, namespace string, redisPool *redis.Po
//Start to serve
func (ms *MessageServer) Start() error {
defer func() {
log.Info("Message server is stopped")
logger.Info("Message server is stopped")
}()
//As we get one connection from the pool, don't try to close it.
@ -66,11 +66,11 @@ func (ms *MessageServer) Start() error {
m := &models.Message{}
if err := json.Unmarshal(res.Data, m); err != nil {
//logged
log.Warningf("read invalid message: %s\n", res.Data)
logger.Warningf("read invalid message: %s\n", res.Data)
}
if callback, ok := ms.callbacks[m.Event]; !ok {
//logged
log.Warningf("no handler to handle event %s\n", m.Event)
logger.Warningf("no handler to handle event %s\n", m.Event)
} else {
//Try to recover the concrete type
var converted interface{}
@ -95,17 +95,17 @@ func (ms *MessageServer) Start() error {
if e != nil {
err := e.(error)
//logged
log.Errorf("failed to fire callback with error: %s\n", err)
logger.Errorf("failed to fire callback with error: %s\n", err)
}
}
case redis.Subscription:
switch res.Kind {
case "subscribe":
log.Infof("Subscribe redis channel %s\n", res.Channel)
logger.Infof("Subscribe redis channel %s\n", res.Channel)
break
case "unsubscribe":
//Unsubscribe all, means main goroutine is exiting
log.Infof("Unsubscribe redis channel %s\n", res.Channel)
logger.Infof("Unsubscribe redis channel %s\n", res.Channel)
done <- nil
return
}
@ -113,7 +113,7 @@ func (ms *MessageServer) Start() error {
}
}()
log.Info("Message server is started")
logger.Info("Message server is started")
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()

View File

@ -9,9 +9,9 @@ import (
"github.com/garyburd/redigo/redis"
"github.com/gocraft/work"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/env"
"github.com/vmware/harbor/src/jobservice_v2/job"
"github.com/vmware/harbor/src/jobservice_v2/logger"
"github.com/vmware/harbor/src/jobservice_v2/models"
"github.com/vmware/harbor/src/jobservice_v2/opm"
"github.com/vmware/harbor/src/jobservice_v2/period"
@ -19,11 +19,7 @@ import (
)
var (
dialConnectionTimeout = 30 * time.Second
healthCheckPeriod = time.Minute
dialReadTimeout = healthCheckPeriod + 10*time.Second
dialWriteTimeout = 10 * time.Second
workerPoolDeadTime = 10 * time.Second
workerPoolDeadTime = 10 * time.Second
)
const (
@ -50,43 +46,21 @@ type GoCraftWorkPool struct {
knownJobs map[string]interface{}
}
//RedisPoolConfig defines configurations for GoCraftWorkPool.
type RedisPoolConfig struct {
RedisHost string
RedisPort uint
Namespace string
WorkerCount uint
}
//RedisPoolContext ...
//We did not use this context to pass context info so far, just a placeholder.
type RedisPoolContext struct{}
//NewGoCraftWorkPool is constructor of goCraftWorkPool.
func NewGoCraftWorkPool(ctx *env.Context, cfg RedisPoolConfig) *GoCraftWorkPool {
redisPool := &redis.Pool{
MaxActive: 6,
MaxIdle: 6,
Wait: true,
Dial: func() (redis.Conn, error) {
return redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", cfg.RedisHost, cfg.RedisPort),
redis.DialConnectTimeout(dialConnectionTimeout),
redis.DialReadTimeout(dialReadTimeout),
redis.DialWriteTimeout(dialWriteTimeout),
)
},
}
pool := work.NewWorkerPool(RedisPoolContext{}, cfg.WorkerCount, cfg.Namespace, redisPool)
enqueuer := work.NewEnqueuer(cfg.Namespace, redisPool)
client := work.NewClient(cfg.Namespace, redisPool)
scheduler := period.NewRedisPeriodicScheduler(ctx, cfg.Namespace, redisPool)
sweeper := period.NewSweeper(cfg.Namespace, redisPool, client)
statsMgr := opm.NewRedisJobStatsManager(ctx.SystemContext, cfg.Namespace, redisPool, client, scheduler)
msgServer := NewMessageServer(ctx.SystemContext, cfg.Namespace, redisPool)
func NewGoCraftWorkPool(ctx *env.Context, namespace string, workerCount uint, redisPool *redis.Pool) *GoCraftWorkPool {
pool := work.NewWorkerPool(RedisPoolContext{}, workerCount, namespace, redisPool)
enqueuer := work.NewEnqueuer(namespace, redisPool)
client := work.NewClient(namespace, redisPool)
scheduler := period.NewRedisPeriodicScheduler(ctx, namespace, redisPool)
sweeper := period.NewSweeper(namespace, redisPool, client)
statsMgr := opm.NewRedisJobStatsManager(ctx.SystemContext, namespace, redisPool, client, scheduler)
msgServer := NewMessageServer(ctx.SystemContext, namespace, redisPool)
return &GoCraftWorkPool{
namespace: cfg.Namespace,
namespace: namespace,
redisPool: redisPool,
pool: pool,
enqueuer: enqueuer,
@ -170,20 +144,20 @@ func (gcwp *GoCraftWorkPool) Start() {
go func() {
defer func() {
gcwp.context.WG.Done()
log.Infof("Redis worker pool is stopped")
logger.Infof("Redis worker pool is stopped")
}()
//Clear dirty data before pool starting
if err := gcwp.sweeper.ClearOutdatedScheduledJobs(); err != nil {
//Only logged
log.Errorf("Clear outdated data before pool starting failed with error:%s\n", err)
logger.Errorf("Clear outdated data before pool starting failed with error:%s\n", err)
}
//Append middlewares
gcwp.pool.Middleware((*RedisPoolContext).logJob)
gcwp.pool.Start()
log.Infof("Redis worker pool is started")
logger.Infof("Redis worker pool is started")
//Block on listening context and done signal
select {
@ -467,7 +441,7 @@ func (gcwp *GoCraftWorkPool) handleRegisterStatusHook(data interface{}) error {
//log the job
func (rpc *RedisPoolContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error {
log.Infof("Job incoming: %s:%s", job.Name, job.ID)
logger.Infof("Job incoming: %s:%s", job.Name, job.ID)
return next()
}

View File

@ -4,14 +4,15 @@ package runtime
import (
"context"
"fmt"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/garyburd/redigo/redis"
"github.com/vmware/harbor/src/common/job"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice_v2/api"
"github.com/vmware/harbor/src/jobservice_v2/config"
"github.com/vmware/harbor/src/jobservice_v2/core"
@ -23,6 +24,13 @@ import (
"github.com/vmware/harbor/src/jobservice_v2/pool"
)
const (
dialConnectionTimeout = 30 * time.Second
healthCheckPeriod = time.Minute
dialReadTimeout = healthCheckPeriod + 10*time.Second
dialWriteTimeout = 10 * time.Second
)
//JobService ...
var JobService = &Bootstrap{}
@ -40,13 +48,7 @@ func (bs *Bootstrap) SetJobContextInitializer(initializer env.JobContextInitiali
//LoadAndRun will load configurations, initialize components and then start the related process to serve requests.
//Return error if meet any problems.
func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
//Load configurations
if err := config.DefaultConfig.Load(configFile, detectEnv); err != nil {
log.Errorf("Failed to load configurations with error: %s\n", err)
return
}
func (bs *Bootstrap) LoadAndRun() {
//Create the root context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -62,7 +64,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
if jobCtx, err := bs.jobConextInitializer(rootContext); err == nil {
rootContext.JobContext = jobCtx
} else {
log.Fatalf("Failed to initialize job context: %s\n", err)
logger.Fatalf("Failed to initialize job context: %s\n", err)
}
}
@ -77,7 +79,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
//Start the API server
apiServer := bs.loadAndRunAPIServer(rootContext, config.DefaultConfig, ctl)
log.Infof("Server is started at %s:%d with %s", "", config.DefaultConfig.Port, config.DefaultConfig.Protocol)
logger.Infof("Server is started at %s:%d with %s", "", config.DefaultConfig.Port, config.DefaultConfig.Protocol)
//Start outdated log files sweeper
logSweeper := logger.NewSweeper(ctx, config.GetLogBasePath(), config.GetLogArchivePeriod())
@ -89,7 +91,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
select {
case <-sig:
case err := <-rootContext.ErrorChan:
log.Errorf("Server error:%s\n", err)
logger.Errorf("Server error:%s\n", err)
}
//Call cancel to send termination signal to other interested parts.
@ -117,7 +119,7 @@ func (bs *Bootstrap) LoadAndRun(configFile string, detectEnv bool) {
rootContext.WG.Wait()
close <- true
log.Infof("Server gracefully exit")
logger.Infof("Server gracefully exit")
}
//Load and run the API server.
@ -143,14 +145,25 @@ func (bs *Bootstrap) loadAndRunAPIServer(ctx *env.Context, cfg *config.Configura
//Load and run the worker pool
func (bs *Bootstrap) loadAndRunRedisWorkerPool(ctx *env.Context, cfg *config.Configuration) pool.Interface {
redisPoolCfg := pool.RedisPoolConfig{
RedisHost: cfg.PoolConfig.RedisPoolCfg.Host,
RedisPort: cfg.PoolConfig.RedisPoolCfg.Port,
Namespace: cfg.PoolConfig.RedisPoolCfg.Namespace,
WorkerCount: cfg.PoolConfig.WorkerCount,
redisPool := &redis.Pool{
MaxActive: 6,
MaxIdle: 6,
Wait: true,
Dial: func() (redis.Conn, error) {
return redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", cfg.PoolConfig.RedisPoolCfg.Host, cfg.PoolConfig.RedisPoolCfg.Port),
redis.DialConnectTimeout(dialConnectionTimeout),
redis.DialReadTimeout(dialReadTimeout),
redis.DialWriteTimeout(dialWriteTimeout),
)
},
}
redisWorkerPool := pool.NewGoCraftWorkPool(ctx, redisPoolCfg)
redisWorkerPool := pool.NewGoCraftWorkPool(ctx,
cfg.PoolConfig.RedisPoolCfg.Namespace,
cfg.PoolConfig.WorkerCount,
redisPool)
//Register jobs here
if err := redisWorkerPool.RegisterJob(impl.KnownJobReplication, (*impl.ReplicationJob)(nil)); err != nil {
//exit
@ -159,9 +172,10 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool(ctx *env.Context, cfg *config.Con
}
if err := redisWorkerPool.RegisterJobs(
map[string]interface{}{
job.ImageScanJob: (*scan.ClairJob)(nil),
job.ImageReplicationTransfer: (*replication.Replicator)(nil),
job.ImageReplicationDelete: (*replication.Deleter)(nil),
job.ImageScanJob: (*scan.ClairJob)(nil),
job.ImageTransfer: (*replication.Transfer)(nil),
job.ImageDelete: (*replication.Deleter)(nil),
job.ImageReplicate: (*replication.Replicator)(nil),
}); err != nil {
//exit
ctx.ErrorChan <- err

View File

@ -41,7 +41,7 @@ func (r *ReplicationAPI) Prepare() {
return
}
if !r.SecurityCtx.IsSysAdmin() {
if !r.SecurityCtx.IsSysAdmin() && !r.SecurityCtx.IsSolutionUser() {
r.HandleForbidden(r.SecurityCtx.GetUsername())
return
}