mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-23 10:45:45 +01:00
Merge pull request #4551 from vmware/switch_job_service
Switch job service from old version to new version
This commit is contained in:
commit
e4952b7a63
4
Makefile
4
Makefile
@ -103,6 +103,7 @@ MARIADBVERSION=$(VERSIONTAG)
|
||||
CLAIRVERSION=v2.0.1
|
||||
CLAIRDBVERSION=$(VERSIONTAG)
|
||||
MIGRATORVERSION=1.4
|
||||
REDISVERSION=$(VERSIONTAG)
|
||||
|
||||
#clarity parameters
|
||||
CLARITYIMAGE=vmware/harbor-clarity-ui-builder[:tag]
|
||||
@ -282,7 +283,7 @@ build:
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile build -e DEVFLAG=$(DEVFLAG) -e MARIADBVERSION=$(MARIADBVERSION) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e NGINXVERSION=$(NGINXVERSION) -e NOTARYVERSION=$(NOTARYVERSION) \
|
||||
-e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRDBVERSION=$(CLAIRDBVERSION) -e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e BUILDBIN=$(BUILDBIN)
|
||||
-e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION)
|
||||
|
||||
modify_composefile: modify_composefile_notary modify_composefile_clair
|
||||
@echo "preparing docker-compose file..."
|
||||
@ -294,6 +295,7 @@ modify_composefile: modify_composefile_notary modify_composefile_clair
|
||||
@$(SEDCMD) -i 's/__reg_version__/$(REGISTRYVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i 's/__nginx_version__/$(NGINXVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/ha/$(DOCKERCOMPOSEFILENAME)
|
||||
@$(SEDCMD) -i 's/__redis_version__/$(REDISVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
modify_composefile_notary:
|
||||
@echo "preparing docker-compose notary file..."
|
||||
|
@ -18,9 +18,9 @@ worker_pool:
|
||||
#Additional config if use 'redis' backend
|
||||
#TODO: switch to internal redis endpoint and namespace.
|
||||
redis_pool:
|
||||
host: "redis_host"
|
||||
host: "redis"
|
||||
port: 6379
|
||||
namespace: "namespace"
|
||||
namespace: "harbor_job_service_namespace"
|
||||
#Logger for job
|
||||
logger:
|
||||
path: "/var/log/jobs"
|
||||
|
@ -103,6 +103,7 @@ services:
|
||||
networks:
|
||||
- harbor
|
||||
depends_on:
|
||||
- redis
|
||||
- ui
|
||||
- adminserver
|
||||
logging:
|
||||
@ -110,6 +111,14 @@ services:
|
||||
options:
|
||||
syslog-address: "tcp://127.0.0.1:1514"
|
||||
tag: "jobservice"
|
||||
redis:
|
||||
image: vmware/redis-photon:__redis_version__
|
||||
container_name: redis
|
||||
restart: always
|
||||
volumes:
|
||||
- /data/redis:/data
|
||||
networks:
|
||||
- harbor
|
||||
proxy:
|
||||
image: vmware/nginx-photon:__nginx_version__
|
||||
container_name: nginx
|
||||
|
@ -80,6 +80,10 @@ DOCKERIMAGENAME_NOTARYSIGNER=vmware/notary-signer-photon
|
||||
DOCKERFILENAME_NOTARYSERVER=server.Dockerfile
|
||||
DOCKERIMAGENAME_NOTARYSERVER=vmware/notary-server-photon
|
||||
|
||||
DOCKERFILEPATH_REDIS=$(DOCKERFILEPATH)/redis
|
||||
DOCKERFILENAME_REDIS=Dockerfile
|
||||
DOCKERIMAGENAME_REDIS=vmware/redis-photon
|
||||
|
||||
_build_db: _build_mariadb
|
||||
@echo "modify the db dockerfile..."
|
||||
@$(SEDCMD) -i 's/__version__/$(MARIADBVERSION)/g' $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB)
|
||||
@ -167,11 +171,16 @@ _build_mariadb:
|
||||
@cd $(DOCKERFILEPATH_MARIADB) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_MARIADB)/$(DOCKERFILENAME_MARIADB) -t $(DOCKERIMAGENAME_MARIADB):$(MARIADBVERSION) .
|
||||
@echo "Done."
|
||||
|
||||
_build_redis:
|
||||
@echo "building redis container for photon..."
|
||||
@cd $(DOCKERFILEPATH_REDIS) && $(DOCKERBUILD) -f $(DOCKERFILEPATH_REDIS)/$(DOCKERFILENAME_REDIS) -t $(DOCKERIMAGENAME_REDIS):$(REDISVERSION) .
|
||||
@echo "Done."
|
||||
|
||||
define _get_binary
|
||||
$(WGET) --timeout 30 --no-check-certificate $1 -O $2
|
||||
endef
|
||||
|
||||
build: _build_postgresql _build_db _build_adminiserver _build_ui _build_jobservice _build_log _build_nginx _build_registry _build_notary _build_clair
|
||||
build: _build_postgresql _build_db _build_adminiserver _build_ui _build_jobservice _build_log _build_nginx _build_registry _build_notary _build_clair _build_redis
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
|
@ -1,43 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/src/common/api"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type jobBaseAPI struct {
|
||||
api.BaseAPI
|
||||
}
|
||||
|
||||
func (j *jobBaseAPI) authenticate() {
|
||||
cookie, err := j.Ctx.Request.Cookie(models.UISecretCookie)
|
||||
if err != nil && err != http.ErrNoCookie {
|
||||
log.Errorf("failed to get cookie %s: %v", models.UISecretCookie, err)
|
||||
j.CustomAbort(http.StatusInternalServerError, "")
|
||||
}
|
||||
|
||||
if err == http.ErrNoCookie {
|
||||
j.CustomAbort(http.StatusUnauthorized, "")
|
||||
}
|
||||
|
||||
if cookie.Value != config.UISecret() {
|
||||
j.CustomAbort(http.StatusForbidden, "")
|
||||
}
|
||||
}
|
@ -8,13 +8,13 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/core"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice/core"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
//Handler defines approaches to handle the http requests.
|
@ -14,8 +14,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
var testingHandler = NewDefaultHandler(&fakeController{})
|
@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
u "github.com/vmware/harbor/src/common/utils"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
)
|
||||
|
||||
// ReplicationJob handles /api/replicationJobs /api/replicationJobs/:id/log
|
||||
// /api/replicationJobs/actions
|
||||
type ReplicationJob struct {
|
||||
jobBaseAPI
|
||||
}
|
||||
|
||||
// ReplicationReq holds informations of request for /api/replicationJobs
|
||||
type ReplicationReq struct {
|
||||
PolicyID int64 `json:"policy_id"`
|
||||
Repo string `json:"repository"`
|
||||
Operation string `json:"operation"`
|
||||
TagList []string `json:"tags"`
|
||||
}
|
||||
|
||||
// Prepare ...
|
||||
func (rj *ReplicationJob) Prepare() {
|
||||
rj.authenticate()
|
||||
}
|
||||
|
||||
// Post creates replication jobs according to the policy.
|
||||
func (rj *ReplicationJob) Post() {
|
||||
var data ReplicationReq
|
||||
rj.DecodeJSONReq(&data)
|
||||
log.Debugf("data: %+v", data)
|
||||
p, err := dao.GetRepPolicy(data.PolicyID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get policy, error: %v", err)
|
||||
rj.RenderError(http.StatusInternalServerError, fmt.Sprintf("Failed to get policy, id: %d", data.PolicyID))
|
||||
return
|
||||
}
|
||||
if p == nil {
|
||||
log.Errorf("Policy not found, id: %d", data.PolicyID)
|
||||
rj.RenderError(http.StatusNotFound, fmt.Sprintf("Policy not found, id: %d", data.PolicyID))
|
||||
return
|
||||
}
|
||||
if len(data.Repo) == 0 { // sync all repositories
|
||||
repoList, err := getRepoList(p.ProjectID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get repository list, project id: %d, error: %v", p.ProjectID, err)
|
||||
rj.RenderError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
log.Debugf("repo list: %v", repoList)
|
||||
for _, repo := range repoList {
|
||||
err := rj.addJob(repo, data.PolicyID, models.RepOpTransfer)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to insert job record, error: %v", err)
|
||||
rj.RenderError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
} else { // sync a single repository
|
||||
var op string
|
||||
if len(data.Operation) > 0 {
|
||||
op = data.Operation
|
||||
} else {
|
||||
op = models.RepOpTransfer
|
||||
}
|
||||
err := rj.addJob(data.Repo, data.PolicyID, op, data.TagList...)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to insert job record, error: %v", err)
|
||||
rj.RenderError(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rj *ReplicationJob) addJob(repo string, policyID int64, operation string, tags ...string) error {
|
||||
j := models.RepJob{
|
||||
Repository: repo,
|
||||
PolicyID: policyID,
|
||||
Operation: operation,
|
||||
TagList: tags,
|
||||
}
|
||||
log.Debugf("Creating job for repo: %s, policy: %d", repo, policyID)
|
||||
id, err := dao.AddRepJob(j)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repJob := job.NewRepJob(id)
|
||||
|
||||
log.Debugf("Send job to scheduler, job id: %d", id)
|
||||
job.Schedule(repJob)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RepActionReq holds informations of request for /api/replicationJobs/actions
|
||||
type RepActionReq struct {
|
||||
PolicyID int64 `json:"policy_id"`
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
// HandleAction supports some operations to all the jobs of one policy
|
||||
func (rj *ReplicationJob) HandleAction() {
|
||||
var data RepActionReq
|
||||
rj.DecodeJSONReq(&data)
|
||||
//Currently only support stop action
|
||||
if data.Action != "stop" {
|
||||
log.Errorf("Unrecognized action: %s", data.Action)
|
||||
rj.RenderError(http.StatusBadRequest, fmt.Sprintf("Unrecongized action: %s", data.Action))
|
||||
return
|
||||
}
|
||||
jobs, err := dao.GetRepJobToStop(data.PolicyID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get jobs to stop, error: %v", err)
|
||||
rj.RenderError(http.StatusInternalServerError, "Faild to get jobs to stop")
|
||||
return
|
||||
}
|
||||
|
||||
runningJobs := []*models.RepJob{}
|
||||
pendingAndRetryingJobs := []*models.RepJob{}
|
||||
for _, job := range jobs {
|
||||
if job.Status == models.JobRunning {
|
||||
runningJobs = append(runningJobs, job)
|
||||
continue
|
||||
}
|
||||
pendingAndRetryingJobs = append(pendingAndRetryingJobs, job)
|
||||
}
|
||||
|
||||
// stop pending and retrying jobs by updating job status in database
|
||||
// when the jobs are dispatched, the status will be checked first
|
||||
for _, job := range pendingAndRetryingJobs {
|
||||
id := job.ID
|
||||
if err := dao.UpdateRepJobStatus(id, models.JobStopped); err != nil {
|
||||
log.Errorf("failed to update the status of job %d: %v", id, err)
|
||||
continue
|
||||
}
|
||||
log.Debugf("the status of job %d is updated to %s", id, models.JobStopped)
|
||||
}
|
||||
|
||||
// stop running jobs in statemachine
|
||||
var repJobs []job.Job
|
||||
for _, j := range runningJobs {
|
||||
//transform the data record to job struct that can be handled by state machine.
|
||||
repJob := job.NewRepJob(j.ID)
|
||||
repJobs = append(repJobs, repJob)
|
||||
}
|
||||
job.WorkerPools[job.ReplicationType].StopJobs(repJobs)
|
||||
}
|
||||
|
||||
// GetLog gets logs of the job
|
||||
func (rj *ReplicationJob) GetLog() {
|
||||
idStr := rj.Ctx.Input.Param(":id")
|
||||
jid, err := strconv.ParseInt(idStr, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing job id: %s, error: %v", idStr, err)
|
||||
rj.RenderError(http.StatusBadRequest, "Invalid job id")
|
||||
return
|
||||
}
|
||||
repJob := job.NewRepJob(jid)
|
||||
logFile := repJob.LogPath()
|
||||
rj.Ctx.Output.Download(logFile)
|
||||
}
|
||||
|
||||
// calls the api from UI to get repo list
|
||||
func getRepoList(projectID int64) ([]string, error) {
|
||||
repositories := []string{}
|
||||
|
||||
client := &http.Client{}
|
||||
uiURL := config.LocalUIURL()
|
||||
next := "/api/repositories?project_id=" + strconv.Itoa(int(projectID))
|
||||
for len(next) != 0 {
|
||||
req, err := http.NewRequest("GET", uiURL+next, nil)
|
||||
if err != nil {
|
||||
return repositories, err
|
||||
}
|
||||
req.AddCookie(&http.Cookie{Name: models.UISecretCookie, Value: config.JobserviceSecret()})
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return repositories, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return repositories, err
|
||||
}
|
||||
return repositories,
|
||||
fmt.Errorf("failed to get repo list, response code: %d, error: %s",
|
||||
resp.StatusCode, string(b))
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return repositories, err
|
||||
}
|
||||
|
||||
var list []*struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
if err = json.Unmarshal(body, &list); err != nil {
|
||||
return repositories, err
|
||||
}
|
||||
for _, repo := range list {
|
||||
repositories = append(repositories, repo.Name)
|
||||
}
|
||||
|
||||
links := u.ParseLink(resp.Header.Get(http.CanonicalHeaderKey("link")))
|
||||
next = links.Next()
|
||||
}
|
||||
|
||||
return repositories, nil
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
// ImageScanJob handles /api/imageScanJobs /api/imageScanJobs/:id/log
|
||||
type ImageScanJob struct {
|
||||
jobBaseAPI
|
||||
}
|
||||
|
||||
// Prepare ...
|
||||
func (isj *ImageScanJob) Prepare() {
|
||||
isj.authenticate()
|
||||
}
|
||||
|
||||
// Post creates a scanner job and hand it to statemachine.
|
||||
func (isj *ImageScanJob) Post() {
|
||||
var data models.ImageScanReq
|
||||
isj.DecodeJSONReq(&data)
|
||||
log.Debugf("data: %+v", data)
|
||||
repoClient, err := utils.NewRepositoryClientForJobservice(data.Repo)
|
||||
if err != nil {
|
||||
log.Errorf("An error occurred while creating repository client: %v", err)
|
||||
isj.RenderError(http.StatusInternalServerError, "Failed to repository client")
|
||||
return
|
||||
}
|
||||
digest, exist, err := repoClient.ManifestExist(data.Tag)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get manifest, error: %v", err)
|
||||
isj.RenderError(http.StatusInternalServerError, "Failed to get manifest")
|
||||
return
|
||||
}
|
||||
if !exist {
|
||||
log.Errorf("The repository based on request: %+v does not exist", data)
|
||||
isj.RenderError(http.StatusNotFound, "")
|
||||
return
|
||||
}
|
||||
//Insert job into DB
|
||||
j := models.ScanJob{
|
||||
Repository: data.Repo,
|
||||
Tag: data.Tag,
|
||||
Digest: digest,
|
||||
}
|
||||
jid, err := dao.AddScanJob(j)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to add scan job to DB, error: %v", err)
|
||||
isj.RenderError(http.StatusInternalServerError, "Failed to insert scan job data.")
|
||||
return
|
||||
}
|
||||
log.Debugf("Scan job id: %d", jid)
|
||||
sj := job.NewScanJob(jid)
|
||||
log.Debugf("Sent job to scheduler, job: %v", sj)
|
||||
job.Schedule(sj)
|
||||
}
|
||||
|
||||
// GetLog gets logs of the job
|
||||
func (isj *ImageScanJob) GetLog() {
|
||||
idStr := isj.Ctx.Input.Param(":id")
|
||||
jid, err := strconv.ParseInt(idStr, 10, 64)
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing job id: %s, error: %v", idStr, err)
|
||||
isj.RenderError(http.StatusBadRequest, "Invalid job id")
|
||||
return
|
||||
}
|
||||
scanJob := job.NewScanJob(jid)
|
||||
logFile := scanJob.LogPath()
|
||||
isj.Ctx.Output.Download(logFile)
|
||||
}
|
@ -9,9 +9,9 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
//Server serves the http requests.
|
@ -1,76 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/src/common/http"
|
||||
"github.com/vmware/harbor/src/common/http/modifier/auth"
|
||||
)
|
||||
|
||||
// Replication holds information for submiting a replication job
|
||||
type Replication struct {
|
||||
PolicyID int64 `json:"policy_id"`
|
||||
Repository string `json:"repository"`
|
||||
Operation string `json:"operation"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
// Client defines the methods that a jobservice client should implement
|
||||
type Client interface {
|
||||
SubmitReplicationJob(*Replication) error
|
||||
StopReplicationJobs(policyID int64) error
|
||||
}
|
||||
|
||||
// DefaultClient provides a default implement for the interface Client
|
||||
type DefaultClient struct {
|
||||
endpoint string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// Config contains configuration items needed for DefaultClient
|
||||
type Config struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
// NewDefaultClient returns an instance of DefaultClient
|
||||
func NewDefaultClient(endpoint string, cfg *Config) *DefaultClient {
|
||||
c := &DefaultClient{
|
||||
endpoint: endpoint,
|
||||
}
|
||||
|
||||
if cfg != nil {
|
||||
c.client = http.NewClient(nil, auth.NewSecretAuthorizer(cfg.Secret))
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// SubmitReplicationJob submits a replication job to the jobservice
|
||||
func (d *DefaultClient) SubmitReplicationJob(replication *Replication) error {
|
||||
url := d.endpoint + "/api/jobs/replication"
|
||||
return d.client.Post(url, replication)
|
||||
}
|
||||
|
||||
// StopReplicationJobs stop replication jobs of the policy specified by the policy ID
|
||||
func (d *DefaultClient) StopReplicationJobs(policyID int64) error {
|
||||
url := d.endpoint + "/api/jobs/replication/actions"
|
||||
return d.client.Post(url, &struct {
|
||||
PolicyID int64 `json:"policy_id"`
|
||||
Action string `json:"action"`
|
||||
}{
|
||||
PolicyID: policyID,
|
||||
Action: "stop",
|
||||
})
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/vmware/harbor/src/common/utils/test"
|
||||
)
|
||||
|
||||
var url string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
requestMapping := []*test.RequestHandlerMapping{
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodPost,
|
||||
Pattern: "/api/jobs/replication/actions",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
action := &struct {
|
||||
PolicyID int64 `json:"policy_id"`
|
||||
Action string `json:"action"`
|
||||
}{}
|
||||
if err := json.NewDecoder(r.Body).Decode(action); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if action.PolicyID != 1 {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
},
|
||||
},
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodPost,
|
||||
Pattern: "/api/jobs/replication",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
replication := &Replication{}
|
||||
if err := json.NewDecoder(r.Body).Decode(replication); err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
server := test.NewServer(requestMapping...)
|
||||
defer server.Close()
|
||||
|
||||
url = server.URL
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestSubmitReplicationJob(t *testing.T) {
|
||||
client := NewDefaultClient(url, &Config{})
|
||||
err := client.SubmitReplicationJob(&Replication{})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestStopReplicationJobs(t *testing.T) {
|
||||
client := NewDefaultClient(url, &Config{})
|
||||
|
||||
// 404
|
||||
err := client.StopReplicationJobs(2)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// 200
|
||||
err = client.StopReplicationJobs(1)
|
||||
assert.Nil(t, err)
|
||||
}
|
@ -1,180 +1,349 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package config provides functions to handle the configurations of job service.
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/vmware/harbor/src/adminserver/client"
|
||||
"github.com/vmware/harbor/src/common"
|
||||
comcfg "github.com/vmware/harbor/src/common/config"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultKeyPath string = "/etc/jobservice/key"
|
||||
defaultLogDir string = "/var/log/jobs"
|
||||
secretCookieName string = "secret"
|
||||
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
|
||||
jobServicePort = "JOB_SERVICE_PORT"
|
||||
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
|
||||
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
|
||||
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
|
||||
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
|
||||
jobServiceRedisHost = "JOB_SERVICE_POOL_REDIS_HOST"
|
||||
jobServiceRedisPort = "JOB_SERVICE_POOL_REDIS_PORT"
|
||||
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
|
||||
jobServiceLoggerBasePath = "JOB_SERVICE_LOGGER_BASE_PATH"
|
||||
jobServiceLoggerLevel = "JOB_SERVICE_LOGGER_LEVEL"
|
||||
jobServiceLoggerArchivePeriod = "JOB_SERVICE_LOGGER_ARCHIVE_PERIOD"
|
||||
jobServiceAdminServerEndpoint = "ADMINSERVER_URL"
|
||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||
|
||||
//JobServiceProtocolHTTPS points to the 'https' protocol
|
||||
JobServiceProtocolHTTPS = "https"
|
||||
//JobServiceProtocolHTTP points to the 'http' protocol
|
||||
JobServiceProtocolHTTP = "http"
|
||||
|
||||
//JobServicePoolBackendRedis represents redis backend
|
||||
JobServicePoolBackendRedis = "redis"
|
||||
)
|
||||
|
||||
var (
|
||||
// AdminserverClient is a client for adminserver
|
||||
AdminserverClient client.Client
|
||||
mg *comcfg.Manager
|
||||
keyProvider comcfg.KeyProvider
|
||||
)
|
||||
//DefaultConfig is the default configuration reference
|
||||
var DefaultConfig = &Configuration{}
|
||||
|
||||
// Init configurations
|
||||
func Init() error {
|
||||
//init key provider
|
||||
initKeyProvider()
|
||||
//Configuration loads and keeps the related configuration items of job service.
|
||||
type Configuration struct {
|
||||
//Protocol server listening on: https/http
|
||||
Protocol string `yaml:"protocol"`
|
||||
|
||||
adminServerURL := os.Getenv("ADMINSERVER_URL")
|
||||
if len(adminServerURL) == 0 {
|
||||
adminServerURL = common.DefaultAdminserverEndpoint
|
||||
}
|
||||
log.Infof("initializing client for adminserver %s ...", adminServerURL)
|
||||
cfg := &client.Config{
|
||||
Secret: UISecret(),
|
||||
}
|
||||
AdminserverClient = client.NewClient(adminServerURL, cfg)
|
||||
if err := AdminserverClient.Ping(); err != nil {
|
||||
return fmt.Errorf("failed to ping adminserver: %v", err)
|
||||
}
|
||||
//Server listening port
|
||||
Port uint `yaml:"port"`
|
||||
|
||||
mg = comcfg.NewManager(AdminserverClient, true)
|
||||
AdminServer string `yaml:"admin_server"`
|
||||
|
||||
if _, err := mg.Load(); err != nil {
|
||||
return err
|
||||
}
|
||||
//Additional config when using https
|
||||
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
|
||||
|
||||
return nil
|
||||
//Configurations of worker pool
|
||||
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
|
||||
|
||||
//Logger configurations
|
||||
LoggerConfig *LoggerConfig `yaml:"logger,omitempty"`
|
||||
}
|
||||
|
||||
func initKeyProvider() {
|
||||
path := os.Getenv("KEY_PATH")
|
||||
if len(path) == 0 {
|
||||
path = defaultKeyPath
|
||||
}
|
||||
log.Infof("key path: %s", path)
|
||||
|
||||
keyProvider = comcfg.NewFileKeyProvider(path)
|
||||
//HTTPSConfig keeps additional configurations when using https protocol
|
||||
type HTTPSConfig struct {
|
||||
Cert string `yaml:"cert"`
|
||||
Key string `yaml:"key"`
|
||||
}
|
||||
|
||||
// Database ...
|
||||
func Database() (*models.Database, error) {
|
||||
cfg, err := mg.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database := &models.Database{}
|
||||
database.Type = cfg[common.DatabaseType].(string)
|
||||
mysql := &models.MySQL{}
|
||||
mysql.Host = cfg[common.MySQLHost].(string)
|
||||
mysql.Port = int(cfg[common.MySQLPort].(float64))
|
||||
mysql.Username = cfg[common.MySQLUsername].(string)
|
||||
mysql.Password = cfg[common.MySQLPassword].(string)
|
||||
mysql.Database = cfg[common.MySQLDatabase].(string)
|
||||
database.MySQL = mysql
|
||||
sqlite := &models.SQLite{}
|
||||
sqlite.File = cfg[common.SQLiteFile].(string)
|
||||
database.SQLite = sqlite
|
||||
|
||||
return database, nil
|
||||
//RedisPoolConfig keeps redis pool info.
|
||||
type RedisPoolConfig struct {
|
||||
Host string `yaml:"host"`
|
||||
Port uint `yaml:"port"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
// MaxJobWorkers ...
|
||||
func MaxJobWorkers() (int, error) {
|
||||
cfg, err := mg.Get()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(cfg[common.MaxJobWorkers].(float64)), nil
|
||||
//PoolConfig keeps worker pool configurations.
|
||||
type PoolConfig struct {
|
||||
//0 means unlimited
|
||||
WorkerCount uint `yaml:"workers"`
|
||||
Backend string `yaml:"backend"`
|
||||
RedisPoolCfg *RedisPoolConfig `yaml:"redis_pool,omitempty"`
|
||||
}
|
||||
|
||||
// LocalUIURL returns the local ui url, job service will use this URL to call API hosted on ui process
|
||||
func LocalUIURL() string {
|
||||
cfg, err := mg.Get()
|
||||
if err != nil {
|
||||
log.Warningf("Failed to Get job service UI URL from backend, error: %v, will return default value.")
|
||||
return common.DefaultUIEndpoint
|
||||
//LoggerConfig keeps logger configurations.
|
||||
type LoggerConfig struct {
|
||||
BasePath string `yaml:"path"`
|
||||
LogLevel string `yaml:"level"`
|
||||
ArchivePeriod uint `yaml:"archive_period"`
|
||||
}
|
||||
|
||||
//Load the configuration options from the specified yaml file.
|
||||
//If the yaml file is specified and existing, load configurations from yaml file first;
|
||||
//If detecting env variables is specified, load configurations from env variables;
|
||||
//Please pay attentions, the detected env variable will override the same configuration item loading from file.
|
||||
//
|
||||
//yamlFilePath string: The path config yaml file
|
||||
//readEnv bool : Whether detect the environment variables or not
|
||||
func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
|
||||
if !utils.IsEmptyStr(yamlFilePath) {
|
||||
//Try to load from file first
|
||||
data, err := ioutil.ReadFile(yamlFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = yaml.Unmarshal(data, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if detectEnv {
|
||||
//Load from env variables
|
||||
c.loadEnvs()
|
||||
}
|
||||
|
||||
//Validate settings
|
||||
return c.validate()
|
||||
}
|
||||
|
||||
//GetLogBasePath returns the log base path config
|
||||
func GetLogBasePath() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.BasePath
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogLevel returns the log level
|
||||
func GetLogLevel() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.LogLevel
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogArchivePeriod returns the archive period
|
||||
func GetLogArchivePeriod() uint {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.ArchivePeriod
|
||||
}
|
||||
|
||||
return 1 //return default
|
||||
}
|
||||
|
||||
//GetAuthSecret get the auth secret from the env
|
||||
func GetAuthSecret() string {
|
||||
return utils.ReadEnv(jobServiceAuthSecret)
|
||||
}
|
||||
|
||||
//GetAdminServerEndpoint return the admin server endpoint
|
||||
func GetAdminServerEndpoint() string {
|
||||
return DefaultConfig.AdminServer
|
||||
}
|
||||
|
||||
//Load env variables
|
||||
func (c *Configuration) loadEnvs() {
|
||||
prot := utils.ReadEnv(jobServiceProtocol)
|
||||
if !utils.IsEmptyStr(prot) {
|
||||
c.Protocol = prot
|
||||
}
|
||||
|
||||
p := utils.ReadEnv(jobServicePort)
|
||||
if !utils.IsEmptyStr(p) {
|
||||
if po, err := strconv.Atoi(p); err == nil {
|
||||
c.Port = uint(po)
|
||||
}
|
||||
}
|
||||
|
||||
//Only when protocol is https
|
||||
if c.Protocol == JobServiceProtocolHTTPS {
|
||||
cert := utils.ReadEnv(jobServiceHTTPCert)
|
||||
if !utils.IsEmptyStr(cert) {
|
||||
if c.HTTPSConfig != nil {
|
||||
c.HTTPSConfig.Cert = cert
|
||||
} else {
|
||||
c.HTTPSConfig = &HTTPSConfig{
|
||||
Cert: cert,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
certKey := utils.ReadEnv(jobServiceHTTPKey)
|
||||
if !utils.IsEmptyStr(certKey) {
|
||||
if c.HTTPSConfig != nil {
|
||||
c.HTTPSConfig.Key = certKey
|
||||
} else {
|
||||
c.HTTPSConfig = &HTTPSConfig{
|
||||
Key: certKey,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backend := utils.ReadEnv(jobServiceWorkerPoolBackend)
|
||||
if !utils.IsEmptyStr(backend) {
|
||||
if c.PoolConfig == nil {
|
||||
c.PoolConfig = &PoolConfig{}
|
||||
}
|
||||
c.PoolConfig.Backend = backend
|
||||
}
|
||||
|
||||
workers := utils.ReadEnv(jobServiceWorkers)
|
||||
if !utils.IsEmptyStr(workers) {
|
||||
if count, err := strconv.Atoi(workers); err == nil {
|
||||
if c.PoolConfig == nil {
|
||||
c.PoolConfig = &PoolConfig{}
|
||||
}
|
||||
c.PoolConfig.WorkerCount = uint(count)
|
||||
}
|
||||
}
|
||||
|
||||
if c.PoolConfig != nil && c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
rh := utils.ReadEnv(jobServiceRedisHost)
|
||||
if !utils.IsEmptyStr(rh) {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Host = rh
|
||||
}
|
||||
|
||||
rp := utils.ReadEnv(jobServiceRedisPort)
|
||||
if !utils.IsEmptyStr(rp) {
|
||||
if rport, err := strconv.Atoi(rp); err == nil {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Port = uint(rport)
|
||||
}
|
||||
}
|
||||
|
||||
rn := utils.ReadEnv(jobServiceRedisNamespace)
|
||||
if !utils.IsEmptyStr(rn) {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Namespace = rn
|
||||
}
|
||||
}
|
||||
|
||||
//logger
|
||||
loggerPath := utils.ReadEnv(jobServiceLoggerBasePath)
|
||||
if !utils.IsEmptyStr(loggerPath) {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.BasePath = loggerPath
|
||||
}
|
||||
loggerLevel := utils.ReadEnv(jobServiceLoggerLevel)
|
||||
if !utils.IsEmptyStr(loggerLevel) {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.LogLevel = loggerLevel
|
||||
}
|
||||
archivePeriod := utils.ReadEnv(jobServiceLoggerArchivePeriod)
|
||||
if !utils.IsEmptyStr(archivePeriod) {
|
||||
if period, err := strconv.Atoi(archivePeriod); err == nil {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.ArchivePeriod = uint(period)
|
||||
}
|
||||
}
|
||||
|
||||
//admin server
|
||||
if adminServer := utils.ReadEnv(jobServiceAdminServerEndpoint); !utils.IsEmptyStr(adminServer) {
|
||||
c.AdminServer = adminServer
|
||||
}
|
||||
return strings.TrimSuffix(cfg[common.UIURL].(string), "/")
|
||||
|
||||
}
|
||||
|
||||
// LocalRegURL returns the local registry url, job service will use this URL to pull image from the registry
|
||||
func LocalRegURL() (string, error) {
|
||||
cfg, err := mg.Get()
|
||||
if err != nil {
|
||||
return "", err
|
||||
//Check if the configurations are valid settings.
|
||||
func (c *Configuration) validate() error {
|
||||
if c.Protocol != JobServiceProtocolHTTPS &&
|
||||
c.Protocol != JobServiceProtocolHTTP {
|
||||
return fmt.Errorf("protocol should be %s or %s, but current setting is %s",
|
||||
JobServiceProtocolHTTP,
|
||||
JobServiceProtocolHTTPS,
|
||||
c.Protocol)
|
||||
}
|
||||
return cfg[common.RegistryURL].(string), nil
|
||||
}
|
||||
|
||||
// LogDir returns the absolute path to which the log file will be written
|
||||
func LogDir() string {
|
||||
dir := os.Getenv("LOG_DIR")
|
||||
if len(dir) == 0 {
|
||||
dir = defaultLogDir
|
||||
if !utils.IsValidPort(c.Port) {
|
||||
return fmt.Errorf("port number should be a none zero integer and less or equal 65535, but current is %d", c.Port)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// SecretKey will return the secret key for encryption/decryption password in target.
|
||||
func SecretKey() (string, error) {
|
||||
return keyProvider.Get(nil)
|
||||
}
|
||||
if c.Protocol == JobServiceProtocolHTTPS {
|
||||
if c.HTTPSConfig == nil {
|
||||
return fmt.Errorf("certificate must be configured if serve with protocol %s", c.Protocol)
|
||||
}
|
||||
|
||||
// UISecret returns a secret to mark UI when communicate with other
|
||||
// component
|
||||
func UISecret() string {
|
||||
return os.Getenv("UI_SECRET")
|
||||
}
|
||||
|
||||
// JobserviceSecret returns a secret to mark Jobservice when communicate with
|
||||
// other component
|
||||
func JobserviceSecret() string {
|
||||
return os.Getenv("JOBSERVICE_SECRET")
|
||||
}
|
||||
|
||||
// ExtEndpoint ...
|
||||
func ExtEndpoint() (string, error) {
|
||||
cfg, err := mg.Get()
|
||||
if err != nil {
|
||||
return "", err
|
||||
if utils.IsEmptyStr(c.HTTPSConfig.Cert) ||
|
||||
!utils.FileExists(c.HTTPSConfig.Cert) ||
|
||||
utils.IsEmptyStr(c.HTTPSConfig.Key) ||
|
||||
!utils.FileExists(c.HTTPSConfig.Key) {
|
||||
return fmt.Errorf("certificate for protocol %s is not correctly configured", c.Protocol)
|
||||
}
|
||||
}
|
||||
return cfg[common.ExtEndpoint].(string), nil
|
||||
}
|
||||
|
||||
// InternalTokenServiceEndpoint ...
|
||||
func InternalTokenServiceEndpoint() string {
|
||||
return LocalUIURL() + "/service/token"
|
||||
}
|
||||
if c.PoolConfig == nil {
|
||||
return errors.New("no worker pool is configured")
|
||||
}
|
||||
|
||||
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
|
||||
func ClairEndpoint() string {
|
||||
cfg, err :=mg.Get()
|
||||
if err != nil {
|
||||
return common.DefaultClairEndpoint
|
||||
if c.PoolConfig.Backend != JobServicePoolBackendRedis {
|
||||
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
|
||||
}
|
||||
if cfg[common.ClairURL] == nil {
|
||||
return common.DefaultClairEndpoint
|
||||
|
||||
//When backend is redis
|
||||
if c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
|
||||
}
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Host) {
|
||||
return errors.New("host of redis pool is empty")
|
||||
}
|
||||
if !utils.IsValidPort(c.PoolConfig.RedisPoolCfg.Port) {
|
||||
return fmt.Errorf("redis port number should be a none zero integer and less or equal 65535, but current is %d", c.PoolConfig.RedisPoolCfg.Port)
|
||||
}
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Namespace) {
|
||||
return errors.New("namespace of redis pool is required")
|
||||
}
|
||||
}
|
||||
return cfg[common.ClairURL].(string)
|
||||
}
|
||||
|
||||
if c.LoggerConfig == nil {
|
||||
return errors.New("missing logger config")
|
||||
}
|
||||
|
||||
if !utils.DirExists(c.LoggerConfig.BasePath) {
|
||||
return errors.New("logger path should be an existing dir")
|
||||
}
|
||||
|
||||
validLevels := "DEBUG,INFO,WARNING,ERROR,FATAL"
|
||||
if !strings.Contains(validLevels, c.LoggerConfig.LogLevel) {
|
||||
return fmt.Errorf("logger level can only be one of: %s", validLevels)
|
||||
}
|
||||
|
||||
if c.LoggerConfig.ArchivePeriod == 0 {
|
||||
return fmt.Errorf("logger archive period should be greater than 0")
|
||||
}
|
||||
|
||||
if _, err := url.Parse(c.AdminServer); err != nil {
|
||||
return fmt.Errorf("invalid admin server endpoint: %s", err)
|
||||
}
|
||||
|
||||
return nil //valid
|
||||
}
|
||||
|
@ -1,82 +1,142 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/vmware/harbor/src/common/utils/test"
|
||||
)
|
||||
|
||||
// test functions under package jobservice/config
|
||||
func TestConfig(t *testing.T) {
|
||||
server, err := test.NewAdminserver(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create a mock admin server: %v", err)
|
||||
func TestConfigLoadingFailed(t *testing.T) {
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("./config.not-existing.yaml", false); err == nil {
|
||||
t.Fatalf("Load config from none-existing document, expect none nil error but got '%s'\n", err)
|
||||
}
|
||||
defer server.Close()
|
||||
|
||||
if err := os.Setenv("ADMINSERVER_URL", server.URL); err != nil {
|
||||
t.Fatalf("failed to set env %s: %v", "ADMINSERVER_URL", err)
|
||||
}
|
||||
|
||||
secretKeyPath := "/tmp/secretkey"
|
||||
_, err = test.GenerateKey(secretKeyPath)
|
||||
if err != nil {
|
||||
t.Errorf("failed to generate secret key: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.Remove(secretKeyPath)
|
||||
assert := assert.New(t)
|
||||
|
||||
if err := os.Setenv("KEY_PATH", secretKeyPath); err != nil {
|
||||
t.Fatalf("failed to set env %s: %v", "KEY_PATH", err)
|
||||
}
|
||||
|
||||
if err := Init(); err != nil {
|
||||
t.Fatalf("failed to initialize configurations: %v", err)
|
||||
}
|
||||
|
||||
if _, err := Database(); err != nil {
|
||||
t.Fatalf("failed to get database settings: %v", err)
|
||||
}
|
||||
|
||||
if _, err := MaxJobWorkers(); err != nil {
|
||||
t.Fatalf("failed to get max job workers: %v", err)
|
||||
}
|
||||
|
||||
if _, err := LocalRegURL(); err != nil {
|
||||
t.Fatalf("failed to get registry URL: %v", err)
|
||||
}
|
||||
|
||||
if dir := LogDir(); dir != "/var/log/jobs" {
|
||||
t.Errorf("unexpected log directory: %s != %s", dir, "/var/log/jobs")
|
||||
}
|
||||
|
||||
if _, err := SecretKey(); err != nil {
|
||||
t.Fatalf("failed to get secret key: %v", err)
|
||||
}
|
||||
|
||||
if len(InternalTokenServiceEndpoint()) == 0 {
|
||||
t.Error("the internal token service endpoint is null")
|
||||
}
|
||||
|
||||
if _, err := ExtEndpoint(); err != nil {
|
||||
t.Fatalf("failed to get ext endpoint: %v", err)
|
||||
}
|
||||
assert.Equal("http://myui:8888", LocalUIURL())
|
||||
}
|
||||
|
||||
func TestConfigLoadingSucceed(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", false); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoadingWithEnv(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
setENV()
|
||||
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if cfg.Protocol != "https" {
|
||||
t.Fatalf("expect protocol 'https', but got '%s'\n", cfg.Protocol)
|
||||
}
|
||||
if cfg.Port != 8989 {
|
||||
t.Fatalf("expect port 8989 but got '%d'\n", cfg.Port)
|
||||
}
|
||||
if cfg.PoolConfig.WorkerCount != 8 {
|
||||
t.Fatalf("expect workcount 8 but go '%d'\n", cfg.PoolConfig.WorkerCount)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Host != "localhost" {
|
||||
t.Fatalf("expect redis host 'localhost' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Host)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Port != 7379 {
|
||||
t.Fatalf("expect redis port '7379' but got '%d'\n", cfg.PoolConfig.RedisPoolCfg.Port)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Namespace != "ut_namespace" {
|
||||
t.Fatalf("expect redis namespace 'ut_namespace' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Namespace)
|
||||
}
|
||||
if cfg.LoggerConfig.BasePath != "/tmp" {
|
||||
t.Fatalf("expect log base path '/tmp' but got '%s'\n", cfg.LoggerConfig.BasePath)
|
||||
}
|
||||
if cfg.LoggerConfig.LogLevel != "DEBUG" {
|
||||
t.Fatalf("expect log level 'DEBUG' but got '%s'\n", cfg.LoggerConfig.LogLevel)
|
||||
}
|
||||
if cfg.LoggerConfig.ArchivePeriod != 5 {
|
||||
t.Fatalf("expect log archive period 5 but got '%d'\n", cfg.LoggerConfig.ArchivePeriod)
|
||||
}
|
||||
|
||||
unsetENV()
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := DefaultConfig.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if endpoint := GetAdminServerEndpoint(); endpoint != "http://127.0.0.1:8888" {
|
||||
t.Fatalf("expect default admin server endpoint 'http://127.0.0.1:8888' but got '%s'\n", endpoint)
|
||||
}
|
||||
|
||||
if basePath := GetLogBasePath(); basePath != "/tmp/job_logs" {
|
||||
t.Fatalf("expect default logger base path '/tmp/job_logs' but got '%s'\n", basePath)
|
||||
}
|
||||
|
||||
if lvl := GetLogLevel(); lvl != "INFO" {
|
||||
t.Fatalf("expect default logger level 'INFO' but got '%s'\n", lvl)
|
||||
}
|
||||
|
||||
if period := GetLogArchivePeriod(); period != 1 {
|
||||
t.Fatalf("expect default log archive period 1 but got '%d'\n", period)
|
||||
}
|
||||
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func setENV() {
|
||||
os.Setenv("JOB_SERVICE_PROTOCOL", "https")
|
||||
os.Setenv("JOB_SERVICE_PORT", "8989")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
|
||||
os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
|
||||
os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_HOST", "localhost")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_PORT", "7379")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_BASE_PATH", "/tmp")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_LEVEL", "DEBUG")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_ARCHIVE_PERIOD", "5")
|
||||
}
|
||||
|
||||
func unsetENV() {
|
||||
os.Unsetenv("JOB_SERVICE_PROTOCOL")
|
||||
os.Unsetenv("JOB_SERVICE_PORT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_HOST")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_PORT")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_BASE_PATH")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_LEVEL")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_ARCHIVE_PERIOD")
|
||||
}
|
||||
|
||||
func CreateLogDir() error {
|
||||
return os.MkdirAll("/tmp/job_logs", 0755)
|
||||
}
|
||||
|
||||
func RemoveLogDir() error {
|
||||
return os.Remove("/tmp/job_logs")
|
||||
}
|
||||
|
@ -8,12 +8,12 @@ import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/robfig/cron"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/pool"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/pool"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -5,10 +5,10 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
func TestLaunchGenericJob(t *testing.T) {
|
@ -4,7 +4,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
//Interface defines the related main methods of job operation.
|
@ -5,7 +5,7 @@ package env
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
//JobContext is combination of BaseContext and other job specified resources.
|
@ -12,11 +12,11 @@ import (
|
||||
"github.com/vmware/harbor/src/common"
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
jlogger "github.com/vmware/harbor/src/jobservice_v2/job/impl/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
jlogger "github.com/vmware/harbor/src/jobservice/job/impl/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
//Context ...
|
@ -10,11 +10,11 @@ import (
|
||||
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
)
|
||||
|
||||
//DemoJob is the job to demostrate the job interface.
|
@ -5,8 +5,8 @@ import (
|
||||
|
||||
common_http "github.com/vmware/harbor/src/common/http"
|
||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
// Deleter deletes repository or images on the destination registry
|
@ -8,8 +8,8 @@ import (
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
reg "github.com/vmware/harbor/src/common/utils/registry"
|
||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
// Replicator call UI's API to start a repliation according to the policy ID
|
@ -17,9 +17,9 @@ import (
|
||||
"github.com/vmware/harbor/src/common/utils"
|
||||
reg "github.com/vmware/harbor/src/common/utils/registry"
|
||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
job_utils "github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
job_utils "github.com/vmware/harbor/src/jobservice/job/impl/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
var (
|
@ -26,8 +26,8 @@ import (
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/clair"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/job/impl/utils"
|
||||
)
|
||||
|
||||
// ClairJob is the struct to scan Harbor's Image with Clair
|
@ -2,7 +2,7 @@
|
||||
|
||||
package job
|
||||
|
||||
import "github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
import "github.com/vmware/harbor/src/jobservice/env"
|
||||
|
||||
//CheckOPCmdFunc is the function to check if the related operation commands
|
||||
//like STOP or CANCEL is fired for the specified job. If yes, return the
|
@ -1,220 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/vmware/harbor/src/common"
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/common/utils/test"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
)
|
||||
|
||||
var repJobID, scanJobID int64
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
//Init config...
|
||||
conf := test.GetDefaultConfigMap()
|
||||
if len(os.Getenv("MYSQL_HOST")) > 0 {
|
||||
conf[common.MySQLHost] = os.Getenv("MYSQL_HOST")
|
||||
}
|
||||
if len(os.Getenv("MYSQL_PORT")) > 0 {
|
||||
p, err := strconv.Atoi(os.Getenv("MYSQL_PORT"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conf[common.MySQLPort] = p
|
||||
}
|
||||
if len(os.Getenv("MYSQL_USR")) > 0 {
|
||||
conf[common.MySQLUsername] = os.Getenv("MYSQL_USR")
|
||||
}
|
||||
if len(os.Getenv("MYSQL_PWD")) > 0 {
|
||||
conf[common.MySQLPassword] = os.Getenv("MYSQL_PWD")
|
||||
}
|
||||
|
||||
server, err := test.NewAdminserver(conf)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to create a mock admin server: %v", err)
|
||||
}
|
||||
defer server.Close()
|
||||
if err := os.Setenv("ADMINSERVER_URL", server.URL); err != nil {
|
||||
log.Fatalf("failed to set env %s: %v", "ADMINSERVER_URL", err)
|
||||
}
|
||||
secretKeyPath := "/tmp/secretkey"
|
||||
_, err = test.GenerateKey(secretKeyPath)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to generate secret key: %v", err)
|
||||
}
|
||||
defer os.Remove(secretKeyPath)
|
||||
if err := os.Setenv("KEY_PATH", secretKeyPath); err != nil {
|
||||
log.Fatalf("failed to set env %s: %v", "KEY_PATH", err)
|
||||
}
|
||||
if err := config.Init(); err != nil {
|
||||
log.Fatalf("failed to initialize configurations: %v", err)
|
||||
}
|
||||
dbSetting, err := config.Database()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get db configurations: %v", err)
|
||||
}
|
||||
if err := dao.InitDatabase(dbSetting); err != nil {
|
||||
log.Fatalf("failed to initialised databse, error: %v", err)
|
||||
}
|
||||
//prepare data
|
||||
if err := prepareRepJobData(); err != nil {
|
||||
log.Fatalf("failed to initialised databse, error: %v", err)
|
||||
}
|
||||
if err := prepareScanJobData(); err != nil {
|
||||
log.Fatalf("failed to initialised databse, error: %v", err)
|
||||
}
|
||||
rc := m.Run()
|
||||
clearRepJobData()
|
||||
clearScanJobData()
|
||||
if rc != 0 {
|
||||
os.Exit(rc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepJob(t *testing.T) {
|
||||
rj := NewRepJob(repJobID)
|
||||
assert := assert.New(t)
|
||||
err := rj.Init()
|
||||
assert.Nil(err)
|
||||
assert.Equal(repJobID, rj.ID())
|
||||
assert.Equal(ReplicationType, rj.Type())
|
||||
p := fmt.Sprintf("/var/log/jobs/job_%d.log", repJobID)
|
||||
assert.Equal(p, rj.LogPath())
|
||||
err = rj.UpdateStatus(models.JobRetrying)
|
||||
assert.Nil(err)
|
||||
j, err := dao.GetRepJob(repJobID)
|
||||
assert.Equal(models.JobRetrying, j.Status)
|
||||
assert.False(rj.parm.Insecure)
|
||||
rj2 := NewRepJob(99999)
|
||||
err = rj2.Init()
|
||||
assert.NotNil(err)
|
||||
}
|
||||
|
||||
func TestScanJob(t *testing.T) {
|
||||
sj := NewScanJob(scanJobID)
|
||||
assert := assert.New(t)
|
||||
err := sj.Init()
|
||||
assert.Nil(err)
|
||||
assert.Equal(scanJobID, sj.ID())
|
||||
assert.Equal(ScanType, sj.Type())
|
||||
p := fmt.Sprintf("/var/log/jobs/scan_job/job_%d.log", scanJobID)
|
||||
assert.Equal(p, sj.LogPath())
|
||||
err = sj.UpdateStatus(models.JobRetrying)
|
||||
assert.Nil(err)
|
||||
j, err := dao.GetScanJob(scanJobID)
|
||||
assert.Equal(models.JobRetrying, j.Status)
|
||||
assert.Equal("sha256:0204dc6e09fa57ab99ac40e415eb637d62c8b2571ecbbc9ca0eb5e2ad2b5c56f", sj.parm.Digest)
|
||||
sj2 := NewScanJob(99999)
|
||||
err = sj2.Init()
|
||||
assert.NotNil(err)
|
||||
}
|
||||
|
||||
func TestStatusUpdater(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
rj := NewRepJob(repJobID)
|
||||
su := &StatusUpdater{rj, models.JobFinished}
|
||||
su.Enter()
|
||||
su.Exit()
|
||||
j, err := dao.GetRepJob(repJobID)
|
||||
assert.Nil(err)
|
||||
assert.Equal(models.JobFinished, j.Status)
|
||||
}
|
||||
|
||||
func prepareRepJobData() error {
|
||||
if err := clearRepJobData(); err != nil {
|
||||
return err
|
||||
}
|
||||
regURL, err := config.LocalRegURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target := models.RepTarget{
|
||||
Name: "name",
|
||||
URL: regURL,
|
||||
Username: "username",
|
||||
Password: "password",
|
||||
}
|
||||
|
||||
targetID, err := dao.AddRepTarget(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
policy := models.RepPolicy{
|
||||
ProjectID: 1,
|
||||
TargetID: targetID,
|
||||
Description: "whatever",
|
||||
Name: "mypolicy",
|
||||
}
|
||||
policyID, err := dao.AddRepPolicy(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
job := models.RepJob{
|
||||
Repository: "library/ubuntu",
|
||||
PolicyID: policyID,
|
||||
Operation: "transfer",
|
||||
TagList: []string{"12.01", "14.04", "latest"},
|
||||
}
|
||||
id, err := dao.AddRepJob(job)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repJobID = id
|
||||
return nil
|
||||
}
|
||||
|
||||
func clearRepJobData() error {
|
||||
if err := dao.ClearTable(models.RepJobTable); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dao.ClearTable(models.RepPolicyTable); err != nil {
|
||||
return err
|
||||
}
|
||||
return dao.ClearTable(models.RepTargetTable)
|
||||
}
|
||||
|
||||
func prepareScanJobData() error {
|
||||
if err := clearScanJobData(); err != nil {
|
||||
return err
|
||||
}
|
||||
sj := models.ScanJob{
|
||||
Status: models.JobPending,
|
||||
Repository: "library/ubuntu",
|
||||
Tag: "15.10",
|
||||
Digest: "sha256:0204dc6e09fa57ab99ac40e415eb637d62c8b2571ecbbc9ca0eb5e2ad2b5c56f",
|
||||
}
|
||||
id, err := dao.AddScanJob(sj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scanJobID = id
|
||||
return nil
|
||||
}
|
||||
|
||||
func clearScanJobData() error {
|
||||
if err := dao.ClearTable(models.ScanJobTable); err != nil {
|
||||
return err
|
||||
}
|
||||
return dao.ClearTable(models.ScanOverviewTable)
|
||||
}
|
@ -1,248 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
uti "github.com/vmware/harbor/src/common/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Type is for job Type
|
||||
type Type int
|
||||
|
||||
const (
|
||||
// ReplicationType is the Type to identify a replication job.
|
||||
ReplicationType Type = iota
|
||||
// ScanType is the Type to identify a image scanning job.
|
||||
ScanType
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
if ReplicationType == t {
|
||||
return "Replication"
|
||||
} else if ScanType == t {
|
||||
return "Scan"
|
||||
} else {
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
//Job is abstraction for image replication and image scan jobs.
|
||||
type Job interface {
|
||||
//ID returns the id of the job
|
||||
ID() int64
|
||||
Type() Type
|
||||
LogPath() string
|
||||
UpdateStatus(status string) error
|
||||
GetStatus() (string, error)
|
||||
Init() error
|
||||
//Parm() interface{}
|
||||
}
|
||||
|
||||
// RepJobParm wraps the parm of a replication job
|
||||
type RepJobParm struct {
|
||||
LocalRegURL string
|
||||
TargetURL string
|
||||
TargetUsername string
|
||||
TargetPassword string
|
||||
Repository string
|
||||
Tags []string
|
||||
Operation string
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
// RepJob implements Job interface, represents a replication job.
|
||||
type RepJob struct {
|
||||
id int64
|
||||
parm *RepJobParm
|
||||
}
|
||||
|
||||
// ID returns the ID of the replication job
|
||||
func (rj *RepJob) ID() int64 {
|
||||
return rj.id
|
||||
}
|
||||
|
||||
// Type returns the type of the replication job, it should always be ReplicationType
|
||||
func (rj *RepJob) Type() Type {
|
||||
return ReplicationType
|
||||
}
|
||||
|
||||
// LogPath returns the absolute path of the particular replication job.
|
||||
func (rj *RepJob) LogPath() string {
|
||||
return GetJobLogPath(config.LogDir(), rj.id)
|
||||
}
|
||||
|
||||
// UpdateStatus ...
|
||||
func (rj *RepJob) UpdateStatus(status string) error {
|
||||
return dao.UpdateRepJobStatus(rj.id, status)
|
||||
}
|
||||
|
||||
// String ...
|
||||
func (rj *RepJob) String() string {
|
||||
return fmt.Sprintf("{JobID: %d, JobType: %v}", rj.ID(), rj.Type())
|
||||
}
|
||||
|
||||
// Init prepares parm for the replication job
|
||||
func (rj *RepJob) Init() error {
|
||||
//init parms
|
||||
job, err := dao.GetRepJob(rj.id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get job, error: %v", err)
|
||||
}
|
||||
if job == nil {
|
||||
return fmt.Errorf("The job doesn't exist in DB, job id: %d", rj.id)
|
||||
}
|
||||
policy, err := dao.GetRepPolicy(job.PolicyID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get policy, error: %v", err)
|
||||
}
|
||||
if policy == nil {
|
||||
return fmt.Errorf("The policy doesn't exist in DB, policy id:%d", job.PolicyID)
|
||||
}
|
||||
|
||||
regURL, err := config.LocalRegURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rj.parm = &RepJobParm{
|
||||
LocalRegURL: regURL,
|
||||
Repository: job.Repository,
|
||||
Tags: job.TagList,
|
||||
Operation: job.Operation,
|
||||
}
|
||||
target, err := dao.GetRepTarget(policy.TargetID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get target, error: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
return fmt.Errorf("The target doesn't exist in DB, target id: %d", policy.TargetID)
|
||||
}
|
||||
rj.parm.TargetURL = target.URL
|
||||
rj.parm.TargetUsername = target.Username
|
||||
pwd := target.Password
|
||||
|
||||
if len(pwd) != 0 {
|
||||
key, err := config.SecretKey()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pwd, err = uti.ReversibleDecrypt(pwd, key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt password: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
rj.parm.TargetPassword = pwd
|
||||
rj.parm.Insecure = target.Insecure
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStatus returns the status of the job
|
||||
func (rj *RepJob) GetStatus() (string, error) {
|
||||
job, err := dao.GetRepJob(rj.id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if job == nil {
|
||||
return "", fmt.Errorf("replication job %v not found", rj.id)
|
||||
}
|
||||
return job.Status, nil
|
||||
}
|
||||
|
||||
// NewRepJob returns a pointer to RepJob which implements the Job interface.
|
||||
// Given API only gets the id, it will call this func to get a instance that can be manuevered by state machine.
|
||||
func NewRepJob(id int64) *RepJob {
|
||||
return &RepJob{id: id}
|
||||
}
|
||||
|
||||
//ScanJob implements the Job interface, representing a job for scanning image.
|
||||
type ScanJob struct {
|
||||
id int64
|
||||
parm *ScanJobParm
|
||||
}
|
||||
|
||||
//ScanJobParm wraps the parms of a image scan job.
|
||||
type ScanJobParm struct {
|
||||
Repository string
|
||||
Tag string
|
||||
Digest string
|
||||
}
|
||||
|
||||
//ID returns the id of the scan
|
||||
func (sj *ScanJob) ID() int64 {
|
||||
return sj.id
|
||||
}
|
||||
|
||||
//Type always return ScanType
|
||||
func (sj *ScanJob) Type() Type {
|
||||
return ScanType
|
||||
}
|
||||
|
||||
//LogPath returns the absolute path of the log file for the job, log files for scan job will be put in a sub folder of base log path.
|
||||
func (sj *ScanJob) LogPath() string {
|
||||
return GetJobLogPath(filepath.Join(config.LogDir(), "scan_job"), sj.id)
|
||||
}
|
||||
|
||||
//String ...
|
||||
func (sj *ScanJob) String() string {
|
||||
return fmt.Sprintf("{JobID: %d, JobType: %v}", sj.ID(), sj.Type())
|
||||
}
|
||||
|
||||
//UpdateStatus ...
|
||||
func (sj *ScanJob) UpdateStatus(status string) error {
|
||||
return dao.UpdateScanJobStatus(sj.id, status)
|
||||
}
|
||||
|
||||
//Init query the DB and populate the information of the image to scan in the parm of this job.
|
||||
func (sj *ScanJob) Init() error {
|
||||
job, err := dao.GetScanJob(sj.id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get job, error: %v", err)
|
||||
}
|
||||
if job == nil {
|
||||
return fmt.Errorf("The job doesn't exist in DB, job id: %d", sj.id)
|
||||
}
|
||||
sj.parm = &ScanJobParm{
|
||||
Repository: job.Repository,
|
||||
Tag: job.Tag,
|
||||
Digest: job.Digest,
|
||||
}
|
||||
err = dao.SetScanJobForImg(job.Digest, sj.id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStatus returns the status of the job
|
||||
func (sj *ScanJob) GetStatus() (string, error) {
|
||||
job, err := dao.GetScanJob(sj.id)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if job == nil {
|
||||
return "", fmt.Errorf("scan job %d not found", sj.id)
|
||||
}
|
||||
return job.Status, nil
|
||||
}
|
||||
|
||||
//NewScanJob creates a instance of ScanJob by id.
|
||||
func NewScanJob(id int64) *ScanJob {
|
||||
return &ScanJob{id: id}
|
||||
}
|
@ -1,64 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// NewLogger create a logger for a speicified job
|
||||
func NewLogger(j Job) (*log.Logger, error) {
|
||||
logFile := j.LogPath()
|
||||
d := filepath.Dir(logFile)
|
||||
if _, err := os.Stat(d); os.IsNotExist(err) {
|
||||
err := os.MkdirAll(d, 0700)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create directory for log file %s, the error: %v", logFile, err)
|
||||
}
|
||||
}
|
||||
f, err := os.OpenFile(logFile, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to open log file %s, the log of job %v will be printed to standard output, the error: %v", logFile, j, err)
|
||||
f = os.Stdout
|
||||
}
|
||||
return log.New(f, log.NewTextFormatter(), log.InfoLevel), nil
|
||||
}
|
||||
|
||||
// GetJobLogPath returns the absolute path in which the job log file is located.
|
||||
func GetJobLogPath(base string, jobID int64) string {
|
||||
f := fmt.Sprintf("job_%d.log", jobID)
|
||||
k := jobID / 1000
|
||||
p := ""
|
||||
var d string
|
||||
for k > 0 {
|
||||
d = strconv.FormatInt(k%1000, 10)
|
||||
k = k / 1000
|
||||
if k > 0 && len(d) == 1 {
|
||||
d = "00" + d
|
||||
}
|
||||
if k > 0 && len(d) == 2 {
|
||||
d = "0" + d
|
||||
}
|
||||
|
||||
p = filepath.Join(d, p)
|
||||
}
|
||||
p = filepath.Join(base, p, f)
|
||||
return p
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
var jobQueue = make(chan Job)
|
||||
|
||||
// Schedule put a job id into job queue.
|
||||
func Schedule(j Job) {
|
||||
jobQueue <- j
|
||||
}
|
||||
|
||||
// Reschedule is called by statemachine to retry a job
|
||||
func Reschedule(j Job) {
|
||||
log.Debugf("Job %v will be rescheduled in 5 minutes", j)
|
||||
time.Sleep(5 * time.Minute)
|
||||
log.Debugf("Rescheduling job %v", j)
|
||||
Schedule(j)
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// StateHandler handles transition, it associates with each state, will be called when
|
||||
// SM enters and exits a state during a transition.
|
||||
type StateHandler interface {
|
||||
// Enter returns the next state, if it returns empty string the SM will hold the current state or
|
||||
// or decide the next state.
|
||||
Enter() (string, error)
|
||||
//Exit should be idempotent
|
||||
Exit() error
|
||||
}
|
||||
|
||||
// StatusUpdater implements the StateHandler interface which updates the status of a job in DB when the job enters
|
||||
// a status.
|
||||
type StatusUpdater struct {
|
||||
Job Job
|
||||
Status string
|
||||
}
|
||||
|
||||
// Enter updates the status of a job and returns "_continue" status to tell state machine to move on.
|
||||
// If the status is a final status it returns empty string and the state machine will be stopped.
|
||||
func (su StatusUpdater) Enter() (string, error) {
|
||||
//err := dao.UpdateRepJobStatus(su.JobID, su.State)
|
||||
err := su.Job.UpdateStatus(su.Status)
|
||||
if err != nil {
|
||||
log.Warningf("Failed to update state of job: %v, status: %s, error: %v", su.Job, su.Status, err)
|
||||
}
|
||||
var next = models.JobContinue
|
||||
if su.Status == models.JobStopped || su.Status == models.JobError || su.Status == models.JobFinished {
|
||||
next = ""
|
||||
}
|
||||
return next, err
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (su StatusUpdater) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retry handles a special "retrying" in which case it will update the status in DB and reschedule the job
|
||||
// via scheduler
|
||||
type Retry struct {
|
||||
Job Job
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (jr Retry) Enter() (string, error) {
|
||||
err := jr.Job.UpdateStatus(models.JobRetrying)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to update state of job: %v to Retrying, error: %v", jr.Job, err)
|
||||
}
|
||||
go Reschedule(jr.Job)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (jr Retry) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImgPuller was for testing
|
||||
type ImgPuller struct {
|
||||
img string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (ip ImgPuller) Enter() (string, error) {
|
||||
ip.logger.Infof("I'm pretending to pull img:%s, then sleep 30s", ip.img)
|
||||
time.Sleep(30 * time.Second)
|
||||
ip.logger.Infof("wake up from sleep.... testing retry")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (ip ImgPuller) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImgPusher is a statehandler for testing
|
||||
type ImgPusher struct {
|
||||
targetURL string
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (ip ImgPusher) Enter() (string, error) {
|
||||
ip.logger.Infof("I'm pretending to push img to:%s, then sleep 30s", ip.targetURL)
|
||||
time.Sleep(30 * time.Second)
|
||||
ip.logger.Infof("wake up from sleep.... testing retry")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (ip ImgPusher) Exit() error {
|
||||
return nil
|
||||
}
|
@ -1,291 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/replication"
|
||||
"github.com/vmware/harbor/src/jobservice/scan"
|
||||
)
|
||||
|
||||
// SM is the state machine to handle job, it handles one job at a time.
|
||||
type SM struct {
|
||||
CurrentJob Job
|
||||
CurrentState string
|
||||
PreviousState string
|
||||
//The states that don't have to exist in transition map, such as "Error", "Canceled"
|
||||
ForcedStates map[string]struct{}
|
||||
Transitions map[string]map[string]struct{}
|
||||
Handlers map[string]StateHandler
|
||||
desiredState string
|
||||
Logger *log.Logger
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
// EnterState transit the statemachine from the current state to the state in parameter.
|
||||
// It returns the next state the statemachine should tranit to.
|
||||
func (sm *SM) EnterState(s string) (string, error) {
|
||||
log.Debugf("Job: %v, transiting from State: %s, to State: %s", sm.CurrentJob, sm.CurrentState, s)
|
||||
targets, ok := sm.Transitions[sm.CurrentState]
|
||||
_, exist := targets[s]
|
||||
_, isForced := sm.ForcedStates[s]
|
||||
if !exist && !isForced {
|
||||
return "", fmt.Errorf("job: %v, transition from %s to %s does not exist", sm.CurrentJob, sm.CurrentState, s)
|
||||
}
|
||||
exitHandler, ok := sm.Handlers[sm.CurrentState]
|
||||
if ok {
|
||||
if err := exitHandler.Exit(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
log.Debugf("Job: %v, no exit handler found for state:%s, skip", sm.CurrentJob, sm.CurrentState)
|
||||
}
|
||||
enterHandler, ok := sm.Handlers[s]
|
||||
var next = models.JobContinue
|
||||
var err error
|
||||
if ok {
|
||||
if next, err = enterHandler.Enter(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
log.Debugf("Job: %v, no handler found for state:%s, skip", sm.CurrentJob, s)
|
||||
}
|
||||
sm.PreviousState = sm.CurrentState
|
||||
sm.CurrentState = s
|
||||
log.Debugf("Job: %v, transition succeeded, current state: %s", sm.CurrentJob, s)
|
||||
return next, nil
|
||||
}
|
||||
|
||||
// Start kicks off the statemachine to transit from current state to s, and moves on
|
||||
// It will search the transit map if the next state is "_continue", and
|
||||
// will enter error state if there's more than one possible path when next state is "_continue"
|
||||
func (sm *SM) Start(s string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
sm.Logger.Errorf("Panic: %v, entering error state", r)
|
||||
log.Warningf("Panic when handling job: %v, panic: %v \n %s \n, entering error state", sm.CurrentJob, r, debug.Stack())
|
||||
sm.EnterState(models.JobError)
|
||||
}
|
||||
}()
|
||||
n, err := sm.EnterState(s)
|
||||
log.Debugf("Job: %v, next state from handler: %s", sm.CurrentJob, n)
|
||||
for len(n) > 0 && err == nil {
|
||||
if d := sm.getDesiredState(); len(d) > 0 {
|
||||
log.Debugf("Job: %v, Desired state: %s, will ignore the next state from handler", sm.CurrentJob, d)
|
||||
n = d
|
||||
sm.setDesiredState("")
|
||||
continue
|
||||
}
|
||||
if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) == 1 {
|
||||
for n = range sm.Transitions[sm.CurrentState] {
|
||||
break
|
||||
}
|
||||
log.Debugf("Job: %v, Continue to state: %s", sm.CurrentJob, n)
|
||||
continue
|
||||
}
|
||||
if n == models.JobContinue && len(sm.Transitions[sm.CurrentState]) != 1 {
|
||||
log.Errorf("Job: %v, next state is continue but there are %d possible next states in transition table", sm.CurrentJob, len(sm.Transitions[sm.CurrentState]))
|
||||
err = fmt.Errorf("Unable to continue")
|
||||
break
|
||||
}
|
||||
n, err = sm.EnterState(n)
|
||||
log.Debugf("Job: %v, next state from handler: %s", sm.CurrentJob, n)
|
||||
}
|
||||
if err != nil {
|
||||
log.Warningf("Job: %v, the statemachine will enter error state due to error: %v", sm.CurrentJob, err)
|
||||
sm.EnterState(models.JobError)
|
||||
}
|
||||
}
|
||||
|
||||
// AddTransition add a transition to the transition table of state machine, the handler is the handler of target state "to"
|
||||
func (sm *SM) AddTransition(from string, to string, h StateHandler) {
|
||||
_, ok := sm.Transitions[from]
|
||||
if !ok {
|
||||
sm.Transitions[from] = make(map[string]struct{})
|
||||
}
|
||||
sm.Transitions[from][to] = struct{}{}
|
||||
sm.Handlers[to] = h
|
||||
}
|
||||
|
||||
// RemoveTransition removes a transition from transition table of the state machine
|
||||
func (sm *SM) RemoveTransition(from string, to string) {
|
||||
_, ok := sm.Transitions[from]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(sm.Transitions[from], to)
|
||||
}
|
||||
|
||||
// Stop will set the desired state as "stopped" such that when next tranisition happen the state machine will stop handling the current job
|
||||
// and the worker can release itself to the workerpool.
|
||||
func (sm *SM) Stop(job Job) {
|
||||
log.Debugf("Trying to stop the job: %v", job)
|
||||
sm.lock.Lock()
|
||||
defer sm.lock.Unlock()
|
||||
//need to check if the sm switched to other job
|
||||
if job.ID() == sm.CurrentJob.ID() && job.Type() == sm.CurrentJob.Type() {
|
||||
sm.desiredState = models.JobStopped
|
||||
log.Debugf("Desired state of job %v is set to stopped", job)
|
||||
} else {
|
||||
log.Debugf("State machine has switched to job %v, so the action to stop job %v will be ignored", sm.CurrentJob, job)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *SM) getDesiredState() string {
|
||||
sm.lock.Lock()
|
||||
defer sm.lock.Unlock()
|
||||
return sm.desiredState
|
||||
}
|
||||
|
||||
func (sm *SM) setDesiredState(s string) {
|
||||
sm.lock.Lock()
|
||||
defer sm.lock.Unlock()
|
||||
sm.desiredState = s
|
||||
}
|
||||
|
||||
// Init initialzie the state machine, it will be called once in the lifecycle of state machine.
|
||||
func (sm *SM) Init() {
|
||||
sm.lock = &sync.Mutex{}
|
||||
sm.Handlers = make(map[string]StateHandler)
|
||||
sm.Transitions = make(map[string]map[string]struct{})
|
||||
sm.ForcedStates = map[string]struct{}{
|
||||
models.JobError: struct{}{},
|
||||
models.JobStopped: struct{}{},
|
||||
models.JobCanceled: struct{}{},
|
||||
models.JobRetrying: struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Reset resets the state machine and after prereq checking, it will start handling the job.
|
||||
func (sm *SM) Reset(j Job) error {
|
||||
//To ensure the Job visible to the thread to stop the SM
|
||||
sm.lock.Lock()
|
||||
sm.CurrentJob = j
|
||||
sm.desiredState = ""
|
||||
sm.lock.Unlock()
|
||||
|
||||
var err error
|
||||
sm.Logger, err = NewLogger(j)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//init states handlers
|
||||
sm.Handlers = make(map[string]StateHandler)
|
||||
sm.Transitions = make(map[string]map[string]struct{})
|
||||
sm.CurrentState = models.JobPending
|
||||
|
||||
sm.AddTransition(models.JobPending, models.JobRunning, StatusUpdater{sm.CurrentJob, models.JobRunning})
|
||||
sm.AddTransition(models.JobRetrying, models.JobRunning, StatusUpdater{sm.CurrentJob, models.JobRunning})
|
||||
sm.Handlers[models.JobError] = StatusUpdater{sm.CurrentJob, models.JobError}
|
||||
sm.Handlers[models.JobStopped] = StatusUpdater{sm.CurrentJob, models.JobStopped}
|
||||
sm.Handlers[models.JobCanceled] = StatusUpdater{sm.CurrentJob, models.JobCanceled}
|
||||
sm.Handlers[models.JobRetrying] = Retry{sm.CurrentJob}
|
||||
if err := sm.CurrentJob.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := sm.initTransitions(); err != nil {
|
||||
return err
|
||||
}
|
||||
return sm.kickOff()
|
||||
}
|
||||
|
||||
func (sm *SM) kickOff() error {
|
||||
log.Debugf("In kickOff: will start job: %v", sm.CurrentJob)
|
||||
sm.Start(models.JobRunning)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *SM) initTransitions() error {
|
||||
switch sm.CurrentJob.Type() {
|
||||
case ReplicationType:
|
||||
repJob, ok := sm.CurrentJob.(*RepJob)
|
||||
if !ok {
|
||||
//Shouldn't be here.
|
||||
return fmt.Errorf("The job: %v is not a type of RepJob", sm.CurrentJob)
|
||||
}
|
||||
jobParm := repJob.parm
|
||||
if jobParm.Operation == models.RepOpTransfer {
|
||||
addImgTransferTransition(sm, jobParm)
|
||||
} else if jobParm.Operation == models.RepOpDelete {
|
||||
addImgDeleteTransition(sm, jobParm)
|
||||
} else {
|
||||
return fmt.Errorf("unsupported operation: %s", jobParm.Operation)
|
||||
}
|
||||
case ScanType:
|
||||
scanJob, ok := sm.CurrentJob.(*ScanJob)
|
||||
if !ok {
|
||||
//Shouldn't be here.
|
||||
return fmt.Errorf("The job: %v is not a type of ScanJob", sm.CurrentJob)
|
||||
}
|
||||
addImgScanTransition(sm, scanJob.parm)
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("Unsupported job type: %v", sm.CurrentJob.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//for testing onlly
|
||||
/*
|
||||
func addTestTransition(sm *SM) error {
|
||||
sm.AddTransition(models.JobRunning, "pull-img", ImgPuller{img: sm.Parms.Repository, logger: sm.Logger})
|
||||
return nil
|
||||
}
|
||||
*/
|
||||
|
||||
func addImgScanTransition(sm *SM, parm *ScanJobParm) {
|
||||
ctx := &scan.JobContext{
|
||||
Repository: parm.Repository,
|
||||
Tag: parm.Tag,
|
||||
Digest: parm.Digest,
|
||||
JobID: sm.CurrentJob.ID(),
|
||||
Logger: sm.Logger,
|
||||
}
|
||||
|
||||
layerScanHandler := &scan.LayerScanHandler{Context: ctx}
|
||||
sm.AddTransition(models.JobRunning, scan.StateInitialize, &scan.Initializer{Context: ctx})
|
||||
sm.AddTransition(scan.StateInitialize, scan.StateScanLayer, layerScanHandler)
|
||||
sm.AddTransition(scan.StateScanLayer, scan.StateScanLayer, layerScanHandler)
|
||||
sm.AddTransition(scan.StateScanLayer, scan.StateSummarize, &scan.SummarizeHandler{Context: ctx})
|
||||
sm.AddTransition(scan.StateSummarize, models.JobFinished, &StatusUpdater{sm.CurrentJob, models.JobFinished})
|
||||
}
|
||||
|
||||
func addImgTransferTransition(sm *SM, parm *RepJobParm) {
|
||||
base := replication.InitBaseHandler(parm.Repository, parm.LocalRegURL, config.JobserviceSecret(),
|
||||
parm.TargetURL, parm.TargetUsername, parm.TargetPassword,
|
||||
parm.Insecure, parm.Tags, sm.Logger)
|
||||
|
||||
sm.AddTransition(models.JobRunning, replication.StateInitialize, &replication.Initializer{BaseHandler: base})
|
||||
sm.AddTransition(replication.StateInitialize, replication.StateCheck, &replication.Checker{BaseHandler: base})
|
||||
sm.AddTransition(replication.StateCheck, replication.StatePullManifest, &replication.ManifestPuller{BaseHandler: base})
|
||||
sm.AddTransition(replication.StatePullManifest, replication.StateTransferBlob, &replication.BlobTransfer{BaseHandler: base})
|
||||
sm.AddTransition(replication.StatePullManifest, models.JobFinished, &StatusUpdater{sm.CurrentJob, models.JobFinished})
|
||||
sm.AddTransition(replication.StateTransferBlob, replication.StatePushManifest, &replication.ManifestPusher{BaseHandler: base})
|
||||
sm.AddTransition(replication.StatePushManifest, replication.StatePullManifest, &replication.ManifestPuller{BaseHandler: base})
|
||||
}
|
||||
|
||||
func addImgDeleteTransition(sm *SM, parm *RepJobParm) {
|
||||
deleter := replication.NewDeleter(parm.Repository, parm.Tags, parm.TargetURL,
|
||||
parm.TargetUsername, parm.TargetPassword, parm.Insecure, sm.Logger)
|
||||
|
||||
sm.AddTransition(models.JobRunning, replication.StateDelete, deleter)
|
||||
sm.AddTransition(replication.StateDelete, models.JobFinished, &StatusUpdater{sm.CurrentJob, models.JobFinished})
|
||||
}
|
@ -1,185 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
)
|
||||
|
||||
// workerPool is a set of workers each worker is associate to a statemachine for handling jobs.
|
||||
// it consists of a channel for free workers and a list to all workers
|
||||
type workerPool struct {
|
||||
poolType Type
|
||||
workerChan chan *Worker
|
||||
workerList []*Worker
|
||||
}
|
||||
|
||||
// WorkerPools is a map contains workerpools for different types of jobs.
|
||||
var WorkerPools map[Type]*workerPool
|
||||
|
||||
// For WorkerPools initialization.
|
||||
var once sync.Once
|
||||
|
||||
//TODO: remove the hard code?
|
||||
const maxScanWorker = 3
|
||||
|
||||
// StopJobs accepts a list of jobs and will try to stop them if any of them is being executed by the worker.
|
||||
func (wp *workerPool) StopJobs(jobs []Job) {
|
||||
log.Debugf("Works working on jobs: %v will be stopped", jobs)
|
||||
for _, j := range jobs {
|
||||
for _, w := range wp.workerList {
|
||||
if w.SM.CurrentJob.ID() == j.ID() {
|
||||
log.Debugf("found a worker whose job ID is %d, type: %v, will try to stop it", j.ID(), j.Type())
|
||||
w.SM.Stop(j)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Worker consists of a channel for job from which worker gets the next job to handle, and a pointer to a statemachine,
|
||||
// the actual work to handle the job is done via state machine.
|
||||
type Worker struct {
|
||||
ID int
|
||||
Type Type
|
||||
Jobs chan Job
|
||||
queue chan *Worker
|
||||
SM *SM
|
||||
quit chan bool
|
||||
}
|
||||
|
||||
// String ...
|
||||
func (w *Worker) String() string {
|
||||
return fmt.Sprintf("{ID: %d, Type: %v}", w.ID, w.Type)
|
||||
}
|
||||
|
||||
// Start is a loop worker gets id from its channel and handle it.
|
||||
func (w *Worker) Start() {
|
||||
go func() {
|
||||
for {
|
||||
w.queue <- w
|
||||
select {
|
||||
case job := <-w.Jobs:
|
||||
log.Debugf("worker: %v, will handle job: %v", w, job)
|
||||
w.handle(job)
|
||||
case q := <-w.quit:
|
||||
if q {
|
||||
log.Debugf("worker: %v, will stop.", w)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop ...
|
||||
func (w *Worker) Stop() {
|
||||
go func() {
|
||||
w.quit <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (w *Worker) handle(job Job) {
|
||||
err := w.SM.Reset(job)
|
||||
if err != nil {
|
||||
log.Errorf("Worker %v, failed to re-initialize statemachine for job: %v, error: %v", w, job, err)
|
||||
err2 := job.UpdateStatus(models.JobError)
|
||||
if err2 != nil {
|
||||
log.Errorf("Failed to update job status to ERROR, job: %v, error:%v", job, err2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewWorker returns a pointer to new instance of worker
|
||||
func NewWorker(id int, t Type, wp *workerPool) *Worker {
|
||||
w := &Worker{
|
||||
ID: id,
|
||||
Type: t,
|
||||
Jobs: make(chan Job),
|
||||
quit: make(chan bool),
|
||||
queue: wp.workerChan,
|
||||
SM: &SM{},
|
||||
}
|
||||
w.SM.Init()
|
||||
return w
|
||||
}
|
||||
|
||||
// InitWorkerPools create worker pools for different types of jobs.
|
||||
func InitWorkerPools() error {
|
||||
maxRepWorker, err := config.MaxJobWorkers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
once.Do(func() {
|
||||
WorkerPools = make(map[Type]*workerPool)
|
||||
WorkerPools[ReplicationType] = createWorkerPool(maxRepWorker, ReplicationType)
|
||||
WorkerPools[ScanType] = createWorkerPool(maxScanWorker, ScanType)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
//createWorkerPool create workers according to parm
|
||||
func createWorkerPool(n int, t Type) *workerPool {
|
||||
wp := &workerPool{
|
||||
workerChan: make(chan *Worker, n),
|
||||
workerList: make([]*Worker, 0, n),
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
worker := NewWorker(i, t, wp)
|
||||
wp.workerList = append(wp.workerList, worker)
|
||||
worker.Start()
|
||||
log.Debugf("worker %v started", worker)
|
||||
}
|
||||
return wp
|
||||
}
|
||||
|
||||
// Dispatch will listen to the jobQueue of job service and try to pick a free worker from the worker pool and assign the job to it.
|
||||
func Dispatch() {
|
||||
for {
|
||||
select {
|
||||
case job := <-jobQueue:
|
||||
go func(job Job) {
|
||||
jobID := job.ID()
|
||||
jobType := strings.ToLower(job.Type().String())
|
||||
log.Debugf("trying to dispatch %s job %d ...", jobType, jobID)
|
||||
worker := <-WorkerPools[job.Type()].workerChan
|
||||
|
||||
status, err := job.GetStatus()
|
||||
if err != nil {
|
||||
// put the work back to the worker pool
|
||||
worker.queue <- worker
|
||||
log.Errorf("failed to get status of %s job %d: %v", jobType, jobID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// check the status of job before dispatching
|
||||
if status == models.JobStopped {
|
||||
// put the work back to the worker pool
|
||||
worker.queue <- worker
|
||||
log.Debugf("%s job %d is stopped, skip dispatching", jobType, jobID)
|
||||
return
|
||||
}
|
||||
|
||||
worker.Jobs <- job
|
||||
log.Debugf("%s job %d dispatched successfully", jobType, jobID)
|
||||
}(job)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
}
|
@ -1,89 +1,59 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/astaxie/beego"
|
||||
"github.com/vmware/harbor/src/common/dao"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/adminserver/client"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/job/impl"
|
||||
ilogger "github.com/vmware/harbor/src/jobservice/job/impl/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/runtime"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.Info("initializing configurations...")
|
||||
if err := config.Init(); err != nil {
|
||||
log.Fatalf("failed to initialize configurations: %v", err)
|
||||
}
|
||||
log.Info("configurations initialization completed")
|
||||
//Get parameters
|
||||
configPath := flag.String("c", "", "Specify the yaml config file path")
|
||||
flag.Parse()
|
||||
|
||||
database, err := config.Database()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to get database configurations: %v", err)
|
||||
//Missing config file
|
||||
if configPath == nil || utils.IsEmptyStr(*configPath) {
|
||||
fmt.Println("Config file should be specified")
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
if err := dao.InitDatabase(database); err != nil {
|
||||
log.Fatalf("failed to initialize database: %v", err)
|
||||
//Load configurations
|
||||
if err := config.DefaultConfig.Load(*configPath, true); err != nil {
|
||||
fmt.Printf("Failed to load configurations with error: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
initRouters()
|
||||
if err := job.InitWorkerPools(); err != nil {
|
||||
log.Fatalf("Failed to initialize worker pools, error: %v", err)
|
||||
}
|
||||
go job.Dispatch()
|
||||
resumeJobs()
|
||||
beego.Run()
|
||||
}
|
||||
|
||||
func resumeJobs() {
|
||||
log.Debugf("Trying to resume halted jobs...")
|
||||
err := dao.ResetRunningJobs()
|
||||
if err != nil {
|
||||
log.Warningf("Failed to reset all running jobs to pending, error: %v", err)
|
||||
}
|
||||
rjobs, err := dao.GetRepJobByStatus(models.JobPending, models.JobRetrying, models.JobRunning)
|
||||
if err == nil {
|
||||
for _, j := range rjobs {
|
||||
rj := job.NewRepJob(j.ID)
|
||||
log.Debugf("Resuming replication job: %v", rj)
|
||||
job.Schedule(rj)
|
||||
//Set job context initializer
|
||||
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
|
||||
secret := config.GetAuthSecret()
|
||||
if utils.IsEmptyStr(secret) {
|
||||
return nil, errors.New("empty auth secret")
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to resume replication jobs, error: %v", err)
|
||||
}
|
||||
sjobs, err := dao.GetScanJobsByStatus(models.JobPending, models.JobRetrying, models.JobRunning)
|
||||
if err == nil {
|
||||
for _, j := range sjobs {
|
||||
sj := job.NewScanJob(j.ID)
|
||||
log.Debugf("Resuming scan job: %v", sj)
|
||||
job.Schedule(sj)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("Failed to resume scan jobs, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
configPath := os.Getenv("CONFIG_PATH")
|
||||
if len(configPath) != 0 {
|
||||
log.Infof("Config path: %s", configPath)
|
||||
if err := beego.LoadAppConfig("ini", configPath); err != nil {
|
||||
log.Fatalf("Failed to load config file: %s, error: %v", configPath, err)
|
||||
adminClient := client.NewClient(config.GetAdminServerEndpoint(), &client.Config{Secret: secret})
|
||||
jobCtx := impl.NewContext(ctx.SystemContext, adminClient)
|
||||
|
||||
if err := jobCtx.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return jobCtx, nil
|
||||
})
|
||||
|
||||
//New logger for job service
|
||||
sLogger := ilogger.NewServiceLogger(config.GetLogLevel())
|
||||
logger.SetLogger(sLogger)
|
||||
|
||||
//Start
|
||||
runtime.JobService.LoadAndRun()
|
||||
}
|
||||
|
@ -13,8 +13,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -7,7 +7,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
func TestHookClient(t *testing.T) {
|
@ -5,7 +5,7 @@ package opm
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
//HookStore is used to cache the hooks in memory.
|
@ -2,7 +2,7 @@
|
||||
|
||||
package opm
|
||||
|
||||
import "github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
import "github.com/vmware/harbor/src/jobservice/models"
|
||||
|
||||
//JobStatsManager defines the methods to handle stats of job.
|
||||
type JobStatsManager interface {
|
@ -13,13 +13,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -13,9 +13,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -9,9 +9,9 @@ import (
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/robfig/cron"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -6,8 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/tests"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/tests"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
func TestPeriodicEnqueuerStartStop(t *testing.T) {
|
@ -2,7 +2,7 @@
|
||||
|
||||
package period
|
||||
|
||||
import "github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
import "github.com/vmware/harbor/src/jobservice/models"
|
||||
|
||||
//Interface defines operations the periodic scheduler should have.
|
||||
type Interface interface {
|
@ -6,7 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -12,10 +12,10 @@ import (
|
||||
"github.com/robfig/cron"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
@ -7,9 +7,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/tests"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/tests"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
var redisPool = tests.GiveMeRedisPool()
|
@ -9,8 +9,8 @@ import (
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
//Sweeper take charge of clearing the outdated data such as scheduled jobs etc..
|
@ -8,8 +8,8 @@ import (
|
||||
|
||||
"github.com/gocraft/work"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/tests"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/tests"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
func TestSweeper(t *testing.T) {
|
@ -2,7 +2,7 @@
|
||||
|
||||
package pool
|
||||
|
||||
import "github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
import "github.com/vmware/harbor/src/jobservice/models"
|
||||
|
||||
//Interface for worker pool.
|
||||
//More like a driver to transparent the lower queue.
|
@ -9,13 +9,13 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/period"
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
//MessageServer implements the sub/pub mechanism via redis to do async message exchanging.
|
@ -8,13 +8,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/period"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/tests"
|
||||
"github.com/vmware/harbor/src/jobservice/tests"
|
||||
)
|
||||
|
||||
var redisPool = tests.GiveMeRedisPool()
|
@ -7,11 +7,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gocraft/work"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
)
|
||||
|
||||
//RedisJob is a job wrapper to wrap the job.Interface to the style which can be recognized by the redis pool.
|
@ -10,13 +10,13 @@ import (
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/gocraft/work"
|
||||
"github.com/robfig/cron"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/models"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/period"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/models"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/period"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
var (
|
@ -9,14 +9,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/errs"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/opm"
|
||||
"github.com/vmware/harbor/src/jobservice/errs"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/opm"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/tests"
|
||||
"github.com/vmware/harbor/src/jobservice/tests"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
)
|
||||
|
||||
var rPool = tests.GiveMeRedisPool()
|
@ -5,7 +5,7 @@ package pool
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job"
|
||||
"github.com/vmware/harbor/src/jobservice/job"
|
||||
)
|
||||
|
||||
//Wrap returns a new job.Interface based on the wrapped job handler reference.
|
@ -1,200 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package replication
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
//"github.com/vmware/harbor/src/common/utils/registry"
|
||||
//"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// StateDelete ...
|
||||
StateDelete = "delete"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotFound = errors.New("Not Found")
|
||||
)
|
||||
|
||||
// Deleter deletes repository or tags
|
||||
type Deleter struct {
|
||||
repository string // prject_name/repo_name
|
||||
tags []string
|
||||
|
||||
dstURL string // url of target registry
|
||||
dstUsr string // username ...
|
||||
dstPwd string // username ...
|
||||
|
||||
insecure bool
|
||||
|
||||
//dstClient *registry.Repository
|
||||
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewDeleter returns a Deleter
|
||||
func NewDeleter(repository string, tags []string, dstURL, dstUsr, dstPwd string, insecure bool, logger *log.Logger) *Deleter {
|
||||
deleter := &Deleter{
|
||||
repository: repository,
|
||||
tags: tags,
|
||||
dstURL: dstURL,
|
||||
dstUsr: dstUsr,
|
||||
dstPwd: dstPwd,
|
||||
insecure: insecure,
|
||||
logger: logger,
|
||||
}
|
||||
deleter.logger.Infof("initialization completed: repository: %s, tags: %v, destination URL: %s, insecure: %v, destination user: %s",
|
||||
deleter.repository, deleter.tags, deleter.dstURL, deleter.insecure, deleter.dstUsr)
|
||||
return deleter
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (d *Deleter) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enter deletes repository or tags
|
||||
func (d *Deleter) Enter() (string, error) {
|
||||
state, err := d.enter()
|
||||
if err != nil && retry(err) {
|
||||
d.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (d *Deleter) enter() (string, error) {
|
||||
url := strings.TrimRight(d.dstURL, "/") + "/api/repositories/"
|
||||
|
||||
// delete repository
|
||||
if len(d.tags) == 0 {
|
||||
u := url + d.repository
|
||||
if err := del(u, d.dstUsr, d.dstPwd, d.insecure); err != nil {
|
||||
if err == errNotFound {
|
||||
d.logger.Warningf("repository %s does not exist on %s", d.repository, d.dstURL)
|
||||
return models.JobFinished, nil
|
||||
}
|
||||
d.logger.Errorf("an error occurred while deleting repository %s on %s with user %s: %v", d.repository, d.dstURL, d.dstUsr, err)
|
||||
return "", err
|
||||
|
||||
}
|
||||
|
||||
d.logger.Infof("repository %s on %s has been deleted", d.repository, d.dstURL)
|
||||
|
||||
return models.JobFinished, nil
|
||||
|
||||
}
|
||||
|
||||
// delele tags
|
||||
for _, tag := range d.tags {
|
||||
u := url + d.repository + "/tags/" + tag
|
||||
if err := del(u, d.dstUsr, d.dstPwd, d.insecure); err != nil {
|
||||
if err == errNotFound {
|
||||
d.logger.Warningf("repository %s does not exist on %s", d.repository, d.dstURL)
|
||||
continue
|
||||
}
|
||||
|
||||
d.logger.Errorf("an error occurred while deleting repository %s:%s on %s with user %s: %v", d.repository, tag, d.dstURL, d.dstUsr, err)
|
||||
return "", err
|
||||
}
|
||||
d.logger.Infof("repository %s:%s on %s has been deleted", d.repository, tag, d.dstURL)
|
||||
}
|
||||
return models.JobFinished, nil
|
||||
|
||||
/*
|
||||
// the follow codes can be used for non-harbor repository deletion
|
||||
dstCred := auth.NewBasicAuthCredential(d.dstUsr, d.dstPwd)
|
||||
dstClient, err := newRepositoryClient(d.dstURL, d.insecure, dstCred,
|
||||
d.repository, "repository", d.repository, "pull", "push", "*")
|
||||
if err != nil {
|
||||
d.logger.Errorf("an error occurred while creating destination repository client: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
d.dstClient = dstClient
|
||||
|
||||
if len(d.tags) == 0 {
|
||||
tags, err := d.dstClient.ListTag()
|
||||
if err != nil {
|
||||
d.logger.Errorf("an error occurred while listing tags of repository %s on %s with user %s: %v", d.repository, d.dstURL, d.dstUsr, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
d.tags = append(d.tags, tags...)
|
||||
}
|
||||
|
||||
d.logger.Infof("tags %v will be deleted", d.tags)
|
||||
|
||||
for _, tag := range d.tags {
|
||||
|
||||
if err := d.dstClient.DeleteTag(tag); err != nil {
|
||||
d.logger.Errorf("an error occurred while deleting repository %s:%s on %s with user %s: %v", d.repository, tag, d.dstURL, d.dstUsr, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
d.logger.Infof("repository %s:%s on %s has been deleted", d.repository, tag, d.dstURL)
|
||||
}
|
||||
|
||||
return models.JobFinished, nil
|
||||
*/
|
||||
}
|
||||
|
||||
func del(url, username, password string, insecure bool) error {
|
||||
req, err := http.NewRequest("DELETE", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(username, password)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: insecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return errNotFound
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("%d %s", resp.StatusCode, string(b))
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package replication
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func retry(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return isNetworkErr(err)
|
||||
}
|
||||
|
||||
func isTemporary(err error) bool {
|
||||
if netErr, ok := err.(net.Error); ok {
|
||||
return netErr.Temporary()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isNetworkErr(err error) bool {
|
||||
_, ok := err.(net.Error)
|
||||
return ok
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package replication
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
}
|
@ -1,515 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package replication
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
comutils "github.com/vmware/harbor/src/common/utils"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
"github.com/vmware/harbor/src/common/utils/registry"
|
||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
// StateInitialize ...
|
||||
StateInitialize = "initialize"
|
||||
// StateCheck ...
|
||||
StateCheck = "check"
|
||||
// StatePullManifest ...
|
||||
StatePullManifest = "pull_manifest"
|
||||
// StateTransferBlob ...
|
||||
StateTransferBlob = "transfer_blob"
|
||||
// StatePushManifest ...
|
||||
StatePushManifest = "push_manifest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrConflict represents http 409 error
|
||||
ErrConflict = errors.New("conflict")
|
||||
)
|
||||
|
||||
// BaseHandler holds informations shared by other state handlers
|
||||
type BaseHandler struct {
|
||||
project string // project_name
|
||||
repository string // prject_name/repo_name
|
||||
tags []string
|
||||
|
||||
srcURL string // url of source registry
|
||||
srcSecret string
|
||||
|
||||
dstURL string // url of target registry
|
||||
dstUsr string // username ...
|
||||
dstPwd string // password ...
|
||||
|
||||
insecure bool // whether skip secure check when using https
|
||||
|
||||
srcClient *registry.Repository
|
||||
dstClient *registry.Repository
|
||||
|
||||
manifest distribution.Manifest // manifest of tags[0]
|
||||
digest string //digest of tags[0]'s manifest
|
||||
blobs []string // blobs need to be transferred for tags[0]
|
||||
|
||||
blobsExistence map[string]bool //key: digest of blob, value: existence
|
||||
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// InitBaseHandler initializes a BaseHandler.
|
||||
func InitBaseHandler(repository, srcURL, srcSecret,
|
||||
dstURL, dstUsr, dstPwd string, insecure bool, tags []string, logger *log.Logger) *BaseHandler {
|
||||
|
||||
base := &BaseHandler{
|
||||
repository: repository,
|
||||
tags: tags,
|
||||
srcURL: srcURL,
|
||||
srcSecret: srcSecret,
|
||||
dstURL: dstURL,
|
||||
dstUsr: dstUsr,
|
||||
dstPwd: dstPwd,
|
||||
insecure: insecure,
|
||||
blobsExistence: make(map[string]bool, 10),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
base.project, _ = comutils.ParseRepository(base.repository)
|
||||
|
||||
return base
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (b *BaseHandler) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializer creates clients for source and destination registry,
|
||||
// lists tags of the repository if parameter tags is nil.
|
||||
type Initializer struct {
|
||||
*BaseHandler
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (i *Initializer) Enter() (string, error) {
|
||||
i.logger.Infof("initializing: repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
|
||||
i.repository, i.tags, i.srcURL, i.dstURL, i.insecure, i.dstUsr)
|
||||
|
||||
state, err := i.enter()
|
||||
if err != nil && retry(err) {
|
||||
i.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (i *Initializer) enter() (string, error) {
|
||||
c := &http.Cookie{Name: models.UISecretCookie, Value: i.srcSecret}
|
||||
srcCred := auth.NewCookieCredential(c)
|
||||
srcClient, err := utils.NewRepositoryClient(i.srcURL, i.insecure, srcCred,
|
||||
config.InternalTokenServiceEndpoint(), i.repository)
|
||||
if err != nil {
|
||||
i.logger.Errorf("an error occurred while creating source repository client: %v", err)
|
||||
return "", err
|
||||
}
|
||||
i.srcClient = srcClient
|
||||
|
||||
dstCred := auth.NewBasicAuthCredential(i.dstUsr, i.dstPwd)
|
||||
dstClient, err := utils.NewRepositoryClient(i.dstURL, i.insecure, dstCred,
|
||||
"", i.repository)
|
||||
if err != nil {
|
||||
i.logger.Errorf("an error occurred while creating destination repository client: %v", err)
|
||||
return "", err
|
||||
}
|
||||
i.dstClient = dstClient
|
||||
|
||||
if len(i.tags) == 0 {
|
||||
tags, err := i.srcClient.ListTag()
|
||||
if err != nil {
|
||||
i.logger.Errorf("an error occurred while listing tags for source repository: %v", err)
|
||||
return "", err
|
||||
}
|
||||
i.tags = tags
|
||||
}
|
||||
|
||||
i.logger.Infof("initialization completed: project: %s, repository: %s, tags: %v, source URL: %s, destination URL: %s, insecure: %v, destination user: %s",
|
||||
i.project, i.repository, i.tags, i.srcURL, i.dstURL, i.insecure, i.dstUsr)
|
||||
|
||||
return StateCheck, nil
|
||||
}
|
||||
|
||||
// Checker checks the existence of project and the user's privlege to the project
|
||||
type Checker struct {
|
||||
*BaseHandler
|
||||
}
|
||||
|
||||
// Enter check existence of project, if it does not exist, create it,
|
||||
// if it exists, check whether the user has write privilege to it.
|
||||
func (c *Checker) Enter() (string, error) {
|
||||
state, err := c.enter()
|
||||
if err != nil && retry(err) {
|
||||
c.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
}
|
||||
|
||||
func (c *Checker) enter() (string, error) {
|
||||
project, err := getProject(c.project)
|
||||
if err != nil {
|
||||
c.logger.Errorf("failed to get project %s from %s: %v", c.project, c.srcURL, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = c.createProject(project)
|
||||
if err == nil {
|
||||
c.logger.Infof("project %s is created on %s with user %s", c.project, c.dstURL, c.dstUsr)
|
||||
return StatePullManifest, nil
|
||||
}
|
||||
|
||||
// other job may be also doing the same thing when the current job
|
||||
// is creating project, so when the response code is 409, continue
|
||||
// to do next step
|
||||
if err == ErrConflict {
|
||||
c.logger.Warningf("the status code is 409 when creating project %s on %s with user %s, try to do next step", c.project, c.dstURL, c.dstUsr)
|
||||
return StatePullManifest, nil
|
||||
}
|
||||
|
||||
c.logger.Errorf("an error occurred while creating project %s on %s with user %s : %v", c.project, c.dstURL, c.dstUsr, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
func getProject(name string) (*models.Project, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, buildProjectURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
q.Set("name", name)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
req.AddCookie(&http.Cookie{
|
||||
Name: models.UISecretCookie,
|
||||
Value: config.JobserviceSecret(),
|
||||
})
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to get project %s: %d %s",
|
||||
name, resp.StatusCode, string(data))
|
||||
}
|
||||
|
||||
list := []*models.Project{}
|
||||
if err = json.Unmarshal(data, &list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var project *models.Project
|
||||
for _, p := range list {
|
||||
if p.Name == name {
|
||||
project = p
|
||||
break
|
||||
}
|
||||
}
|
||||
if project == nil {
|
||||
return nil, fmt.Errorf("project %s not found", name)
|
||||
}
|
||||
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (c *Checker) createProject(project *models.Project) error {
|
||||
// only replicate the public property of project
|
||||
pro := struct {
|
||||
models.ProjectRequest
|
||||
Public int `json:"public"`
|
||||
}{
|
||||
ProjectRequest: models.ProjectRequest{
|
||||
Name: project.Name,
|
||||
Metadata: map[string]string{
|
||||
models.ProMetaPublic: strconv.FormatBool(project.IsPublic()),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// put "public" property in both metadata and public field to keep compatibility
|
||||
// with old version API(<=1.2.0)
|
||||
if project.IsPublic() {
|
||||
pro.Public = 1
|
||||
}
|
||||
|
||||
data, err := json.Marshal(pro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
url := strings.TrimRight(c.dstURL, "/") + "/api/projects/"
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.SetBasicAuth(c.dstUsr, c.dstPwd)
|
||||
req.Header.Set(http.CanonicalHeaderKey("content-type"), "application/json")
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: c.insecure,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
// version 0.1.1's reponse code is 200
|
||||
if resp.StatusCode == http.StatusCreated ||
|
||||
resp.StatusCode == http.StatusOK {
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusConflict {
|
||||
return ErrConflict
|
||||
}
|
||||
|
||||
message, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
c.logger.Errorf("an error occurred while reading message from response: %v", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to create project %s on %s with user %s: %d %s",
|
||||
c.project, c.dstURL, c.dstUsr, resp.StatusCode, string(message))
|
||||
}
|
||||
|
||||
func buildProjectURL() string {
|
||||
return strings.TrimRight(config.LocalUIURL(), "/") + "/api/projects"
|
||||
}
|
||||
|
||||
// ManifestPuller pulls the manifest of a tag. And if no tag needs to be pulled,
|
||||
// the next state that state machine should enter is "finished".
|
||||
type ManifestPuller struct {
|
||||
*BaseHandler
|
||||
}
|
||||
|
||||
// Enter pulls manifest of a tag and checks if all blobs exist in the destination registry
|
||||
func (m *ManifestPuller) Enter() (string, error) {
|
||||
state, err := m.enter()
|
||||
if err != nil && retry(err) {
|
||||
m.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
|
||||
}
|
||||
|
||||
func (m *ManifestPuller) enter() (string, error) {
|
||||
if len(m.tags) == 0 {
|
||||
m.logger.Infof("no tag needs to be replicated, next state is \"finished\"")
|
||||
return models.JobFinished, nil
|
||||
}
|
||||
|
||||
name := m.repository
|
||||
tag := m.tags[0]
|
||||
|
||||
acceptMediaTypes := []string{schema1.MediaTypeManifest, schema2.MediaTypeManifest}
|
||||
digest, mediaType, payload, err := m.srcClient.PullManifest(tag, acceptMediaTypes)
|
||||
if err != nil {
|
||||
m.logger.Errorf("an error occurred while pulling manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
|
||||
return "", err
|
||||
}
|
||||
m.digest = digest
|
||||
m.logger.Infof("manifest of %s:%s pulled successfully from %s: %s", name, tag, m.srcURL, digest)
|
||||
|
||||
if strings.Contains(mediaType, "application/json") {
|
||||
mediaType = schema1.MediaTypeManifest
|
||||
}
|
||||
|
||||
manifest, _, err := registry.UnMarshal(mediaType, payload)
|
||||
if err != nil {
|
||||
m.logger.Errorf("an error occurred while parsing manifest of %s:%s from %s: %v", name, tag, m.srcURL, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
m.manifest = manifest
|
||||
|
||||
// all blobs(layers and config)
|
||||
var blobs []string
|
||||
|
||||
for _, discriptor := range manifest.References() {
|
||||
blobs = append(blobs, discriptor.Digest.String())
|
||||
}
|
||||
|
||||
m.logger.Infof("all blobs of %s:%s from %s: %v", name, tag, m.srcURL, blobs)
|
||||
|
||||
for _, blob := range blobs {
|
||||
exist, ok := m.blobsExistence[blob]
|
||||
if !ok {
|
||||
exist, err = m.dstClient.BlobExist(blob)
|
||||
if err != nil {
|
||||
m.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on %s: %v", blob, name, tag, m.dstURL, err)
|
||||
return "", err
|
||||
}
|
||||
m.blobsExistence[blob] = exist
|
||||
}
|
||||
|
||||
if !exist {
|
||||
m.blobs = append(m.blobs, blob)
|
||||
} else {
|
||||
m.logger.Infof("blob %s of %s:%s already exists in %s", blob, name, tag, m.dstURL)
|
||||
}
|
||||
}
|
||||
m.logger.Infof("blobs of %s:%s need to be transferred to %s: %v", name, tag, m.dstURL, m.blobs)
|
||||
|
||||
return StateTransferBlob, nil
|
||||
}
|
||||
|
||||
// BlobTransfer transfers blobs of a tag
|
||||
type BlobTransfer struct {
|
||||
*BaseHandler
|
||||
}
|
||||
|
||||
// Enter pulls blobs and then pushs them to destination registry.
|
||||
func (b *BlobTransfer) Enter() (string, error) {
|
||||
state, err := b.enter()
|
||||
if err != nil && retry(err) {
|
||||
b.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
|
||||
}
|
||||
|
||||
func (b *BlobTransfer) enter() (string, error) {
|
||||
name := b.repository
|
||||
tag := b.tags[0]
|
||||
for _, blob := range b.blobs {
|
||||
b.logger.Infof("transferring blob %s of %s:%s to %s ...", blob, name, tag, b.dstURL)
|
||||
size, data, err := b.srcClient.PullBlob(blob)
|
||||
if err != nil {
|
||||
b.logger.Errorf("an error occurred while pulling blob %s of %s:%s from %s: %v", blob, name, tag, b.srcURL, err)
|
||||
return "", err
|
||||
}
|
||||
if data != nil {
|
||||
defer data.Close()
|
||||
}
|
||||
if err = b.dstClient.PushBlob(blob, size, data); err != nil {
|
||||
b.logger.Errorf("an error occurred while pushing blob %s of %s:%s to %s : %v", blob, name, tag, b.dstURL, err)
|
||||
return "", err
|
||||
}
|
||||
b.logger.Infof("blob %s of %s:%s transferred to %s completed", blob, name, tag, b.dstURL)
|
||||
}
|
||||
|
||||
return StatePushManifest, nil
|
||||
}
|
||||
|
||||
// ManifestPusher pushs the manifest to destination registry
|
||||
type ManifestPusher struct {
|
||||
*BaseHandler
|
||||
}
|
||||
|
||||
// Enter checks the existence of manifest in the source registry first, and if it
|
||||
// exists, pushs it to destination registry. The checking operation is to avoid
|
||||
// the situation that the tag is deleted during the blobs transfering
|
||||
func (m *ManifestPusher) Enter() (string, error) {
|
||||
state, err := m.enter()
|
||||
if err != nil && retry(err) {
|
||||
m.logger.Info("waiting for retrying...")
|
||||
return models.JobRetrying, nil
|
||||
}
|
||||
|
||||
return state, err
|
||||
|
||||
}
|
||||
|
||||
func (m *ManifestPusher) enter() (string, error) {
|
||||
name := m.repository
|
||||
tag := m.tags[0]
|
||||
_, exist, err := m.srcClient.ManifestExist(tag)
|
||||
if err != nil {
|
||||
m.logger.Infof("an error occurred while checking the existence of manifest of %s:%s on %s: %v", name, tag, m.srcURL, err)
|
||||
return "", err
|
||||
}
|
||||
if !exist {
|
||||
m.logger.Infof("manifest of %s:%s does not exist on source registry %s, cancel manifest pushing", name, tag, m.srcURL)
|
||||
} else {
|
||||
m.logger.Infof("manifest of %s:%s exists on source registry %s, continue manifest pushing", name, tag, m.srcURL)
|
||||
|
||||
digest, manifestExist, err := m.dstClient.ManifestExist(tag)
|
||||
if manifestExist && digest == m.digest {
|
||||
m.logger.Infof("manifest of %s:%s exists on destination registry %s, skip manifest pushing", name, tag, m.dstURL)
|
||||
|
||||
m.tags = m.tags[1:]
|
||||
m.manifest = nil
|
||||
m.digest = ""
|
||||
m.blobs = nil
|
||||
|
||||
return StatePullManifest, nil
|
||||
}
|
||||
|
||||
mediaType, data, err := m.manifest.Payload()
|
||||
if err != nil {
|
||||
m.logger.Errorf("an error occurred while getting payload of manifest for %s:%s : %v", name, tag, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, err = m.dstClient.PushManifest(tag, mediaType, data); err != nil {
|
||||
m.logger.Errorf("an error occurred while pushing manifest of %s:%s to %s : %v", name, tag, m.dstURL, err)
|
||||
return "", err
|
||||
}
|
||||
m.logger.Infof("manifest of %s:%s has been pushed to %s", name, tag, m.dstURL)
|
||||
}
|
||||
|
||||
m.tags = m.tags[1:]
|
||||
m.manifest = nil
|
||||
m.digest = ""
|
||||
m.blobs = nil
|
||||
|
||||
return StatePullManifest, nil
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
api "github.com/vmware/harbor/src/jobservice/api"
|
||||
|
||||
"github.com/astaxie/beego"
|
||||
)
|
||||
|
||||
func initRouters() {
|
||||
beego.Router("/api/jobs/replication", &api.ReplicationJob{})
|
||||
beego.Router("/api/jobs/replication/:id/log", &api.ReplicationJob{}, "get:GetLog")
|
||||
beego.Router("/api/jobs/replication/actions", &api.ReplicationJob{}, "post:HandleAction")
|
||||
beego.Router("/api/jobs/scan", &api.ImageScanJob{})
|
||||
beego.Router("/api/jobs/scan/:id/log", &api.ImageScanJob{}, "get:GetLog")
|
||||
}
|
@ -13,15 +13,15 @@ import (
|
||||
|
||||
"github.com/garyburd/redigo/redis"
|
||||
"github.com/vmware/harbor/src/common/job"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/api"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/core"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl/replication"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl/scan"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/pool"
|
||||
"github.com/vmware/harbor/src/jobservice/api"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/core"
|
||||
"github.com/vmware/harbor/src/jobservice/env"
|
||||
"github.com/vmware/harbor/src/jobservice/job/impl"
|
||||
"github.com/vmware/harbor/src/jobservice/job/impl/replication"
|
||||
"github.com/vmware/harbor/src/jobservice/job/impl/scan"
|
||||
"github.com/vmware/harbor/src/jobservice/logger"
|
||||
"github.com/vmware/harbor/src/jobservice/pool"
|
||||
)
|
||||
|
||||
const (
|
@ -1,45 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scan
|
||||
|
||||
import (
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/clair"
|
||||
"github.com/vmware/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// StateInitialize in this state the handler will initialize the job context.
|
||||
StateInitialize = "initialize"
|
||||
// StateScanLayer in this state the handler will POST layer of clair to scan layer by layer of the image.
|
||||
StateScanLayer = "scanlayer"
|
||||
// StateSummarize in this state, the layers are scanned by clair it will call clair API to update vulnerability overview in Harbor DB. After this state, the job is finished.
|
||||
StateSummarize = "summarize"
|
||||
)
|
||||
|
||||
//JobContext is for sharing data across handlers in a execution of a scan job.
|
||||
type JobContext struct {
|
||||
JobID int64
|
||||
Repository string
|
||||
Tag string
|
||||
Digest string
|
||||
//The array of data object to set as request body for layer scan.
|
||||
layers []models.ClairLayer
|
||||
current int
|
||||
//token for accessing the registry
|
||||
token string
|
||||
clairClient *clair.Client
|
||||
Logger *log.Logger
|
||||
}
|
@ -1,147 +0,0 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scan
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/clair"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/vmware/harbor/src/jobservice/utils"
|
||||
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Initializer will handle the initialise state pull the manifest, prepare token.
|
||||
type Initializer struct {
|
||||
Context *JobContext
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (iz *Initializer) Enter() (string, error) {
|
||||
logger := iz.Context.Logger
|
||||
logger.Infof("Entered scan initializer")
|
||||
regURL, err := config.LocalRegURL()
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to read regURL, error: %v", err)
|
||||
return "", err
|
||||
}
|
||||
repoClient, err := utils.NewRepositoryClientForJobservice(iz.Context.Repository)
|
||||
if err != nil {
|
||||
logger.Errorf("An error occurred while creating repository client: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, _, payload, err := repoClient.PullManifest(iz.Context.Digest, []string{schema2.MediaTypeManifest})
|
||||
if err != nil {
|
||||
logger.Errorf("Error pulling manifest for image %s:%s :%v", iz.Context.Repository, iz.Context.Tag, err)
|
||||
return "", err
|
||||
}
|
||||
manifest, _, err := distribution.UnmarshalManifest(schema2.MediaTypeManifest, payload)
|
||||
if err != nil {
|
||||
logger.Error("Failed to unMarshal manifest from response")
|
||||
return "", err
|
||||
}
|
||||
|
||||
tk, err := utils.GetTokenForRepo(iz.Context.Repository)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
logger.Infof("Image: %s:%s, digest: %s", iz.Context.Repository, iz.Context.Tag, iz.Context.Digest)
|
||||
iz.Context.token = tk
|
||||
iz.Context.clairClient = clair.NewClient(config.ClairEndpoint(), logger)
|
||||
iz.prepareLayers(regURL, manifest.References())
|
||||
return StateScanLayer, nil
|
||||
}
|
||||
|
||||
func (iz *Initializer) prepareLayers(registryEndpoint string, descriptors []distribution.Descriptor) {
|
||||
tokenHeader := map[string]string{"Connection": "close", "Authorization": fmt.Sprintf("Bearer %s", iz.Context.token)}
|
||||
// form the chain by using the digests of all parent layers in the image, such that if another image is built on top of this image the layer name can be re-used.
|
||||
shaChain := ""
|
||||
for _, d := range descriptors {
|
||||
if d.MediaType == schema2.MediaTypeConfig {
|
||||
continue
|
||||
}
|
||||
shaChain += string(d.Digest) + "-"
|
||||
l := models.ClairLayer{
|
||||
Name: fmt.Sprintf("%x", sha256.Sum256([]byte(shaChain))),
|
||||
Headers: tokenHeader,
|
||||
Format: "Docker",
|
||||
Path: utils.BuildBlobURL(registryEndpoint, iz.Context.Repository, string(d.Digest)),
|
||||
}
|
||||
if len(iz.Context.layers) > 0 {
|
||||
l.ParentName = iz.Context.layers[len(iz.Context.layers)-1].Name
|
||||
}
|
||||
iz.Context.layers = append(iz.Context.layers, l)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (iz *Initializer) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//LayerScanHandler will call clair API to trigger scanning.
|
||||
type LayerScanHandler struct {
|
||||
Context *JobContext
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (ls *LayerScanHandler) Enter() (string, error) {
|
||||
logger := ls.Context.Logger
|
||||
currentLayer := ls.Context.layers[ls.Context.current]
|
||||
logger.Infof("Entered scan layer handler, current: %d, layer name: %s, layer path: %s", ls.Context.current, currentLayer.Name, currentLayer.Path)
|
||||
err := ls.Context.clairClient.ScanLayer(currentLayer)
|
||||
if err != nil {
|
||||
logger.Errorf("Unexpected error: %v", err)
|
||||
return "", err
|
||||
}
|
||||
ls.Context.current++
|
||||
if ls.Context.current == len(ls.Context.layers) {
|
||||
return StateSummarize, nil
|
||||
}
|
||||
logger.Infof("After scanning, return with next state: %s", StateScanLayer)
|
||||
return StateScanLayer, nil
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (ls *LayerScanHandler) Exit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SummarizeHandler will summarize the vulnerability and feature information of Clair, and store into Harbor's DB.
|
||||
type SummarizeHandler struct {
|
||||
Context *JobContext
|
||||
}
|
||||
|
||||
// Enter ...
|
||||
func (sh *SummarizeHandler) Enter() (string, error) {
|
||||
logger := sh.Context.Logger
|
||||
logger.Infof("Entered summarize handler")
|
||||
layerName := sh.Context.layers[len(sh.Context.layers)-1].Name
|
||||
logger.Infof("Top layer's name: %s, will use it to get the vulnerability result of image", layerName)
|
||||
clairURL := config.ClairEndpoint()
|
||||
if err := clair.UpdateScanOverview(sh.Context.Digest, layerName, clairURL); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return models.JobFinished, nil
|
||||
}
|
||||
|
||||
// Exit ...
|
||||
func (sh *SummarizeHandler) Exit() error {
|
||||
return nil
|
||||
}
|
@ -1,105 +1,87 @@
|
||||
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package utils provides reusable and sharable utilities for other packages and components.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"errors"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/registry/auth/token"
|
||||
"github.com/vmware/harbor/src/common/models"
|
||||
"github.com/vmware/harbor/src/common/utils/registry"
|
||||
"github.com/vmware/harbor/src/common/utils/registry/auth"
|
||||
"github.com/vmware/harbor/src/jobservice/config"
|
||||
"github.com/garyburd/redigo/redis"
|
||||
)
|
||||
|
||||
// NewRepositoryClient creates a repository client with standard token authorizer
|
||||
func NewRepositoryClient(endpoint string, insecure bool, credential auth.Credential,
|
||||
tokenServiceEndpoint, repository string) (*registry.Repository, error) {
|
||||
|
||||
transport := registry.GetHTTPTransport(insecure)
|
||||
|
||||
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
|
||||
Transport: transport,
|
||||
}, credential, tokenServiceEndpoint)
|
||||
|
||||
uam := &userAgentModifier{
|
||||
userAgent: "harbor-registry-client",
|
||||
}
|
||||
|
||||
return registry.NewRepository(repository, endpoint, &http.Client{
|
||||
Transport: registry.NewTransport(transport, authorizer, uam),
|
||||
})
|
||||
//IsEmptyStr check if the specified str is empty (len ==0) after triming prefix and suffix spaces.
|
||||
func IsEmptyStr(str string) bool {
|
||||
return len(strings.TrimSpace(str)) == 0
|
||||
}
|
||||
|
||||
// NewRepositoryClientForJobservice creates a repository client that can only be used to
|
||||
// access the internal registry
|
||||
func NewRepositoryClientForJobservice(repository string) (*registry.Repository, error) {
|
||||
endpoint, err := config.LocalRegURL()
|
||||
//ReadEnv return the value of env variable.
|
||||
func ReadEnv(key string) string {
|
||||
return os.Getenv(key)
|
||||
}
|
||||
|
||||
//FileExists check if the specified exists.
|
||||
func FileExists(file string) bool {
|
||||
if !IsEmptyStr(file) {
|
||||
_, err := os.Stat(file)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
//DirExists check if the specified dir exists
|
||||
func DirExists(path string) bool {
|
||||
if IsEmptyStr(path) {
|
||||
return false
|
||||
}
|
||||
|
||||
f, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.IsDir()
|
||||
}
|
||||
|
||||
//IsValidPort check if port is valid.
|
||||
func IsValidPort(port uint) bool {
|
||||
return port != 0 && port < 65536
|
||||
}
|
||||
|
||||
//JobScore represents the data item with score in the redis db.
|
||||
type JobScore struct {
|
||||
JobBytes []byte
|
||||
Score int64
|
||||
}
|
||||
|
||||
//GetZsetByScore get the items from the zset filtered by the specified score scope.
|
||||
func GetZsetByScore(pool *redis.Pool, key string, scores []int64) ([]JobScore, error) {
|
||||
if pool == nil || IsEmptyStr(key) || len(scores) < 2 {
|
||||
return nil, errors.New("bad arguments")
|
||||
}
|
||||
|
||||
conn := pool.Get()
|
||||
defer conn.Close()
|
||||
|
||||
values, err := redis.Values(conn.Do("ZRANGEBYSCORE", key, scores[0], scores[1], "WITHSCORES"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
transport := registry.GetHTTPTransport()
|
||||
var jobsWithScores []JobScore
|
||||
|
||||
credential := auth.NewCookieCredential(&http.Cookie{
|
||||
Name: models.UISecretCookie,
|
||||
Value: config.JobserviceSecret(),
|
||||
})
|
||||
|
||||
authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
|
||||
Transport: transport,
|
||||
}, credential, config.InternalTokenServiceEndpoint())
|
||||
|
||||
uam := &userAgentModifier{
|
||||
userAgent: "harbor-registry-client",
|
||||
if err := redis.ScanSlice(values, &jobsWithScores); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return registry.NewRepository(repository, endpoint, &http.Client{
|
||||
Transport: registry.NewTransport(transport, authorizer, uam),
|
||||
})
|
||||
}
|
||||
|
||||
type userAgentModifier struct {
|
||||
userAgent string
|
||||
}
|
||||
|
||||
// Modify adds user-agent header to the request
|
||||
func (u *userAgentModifier) Modify(req *http.Request) error {
|
||||
req.Header.Set(http.CanonicalHeaderKey("User-Agent"), u.userAgent)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildBlobURL ...
|
||||
func BuildBlobURL(endpoint, repository, digest string) string {
|
||||
return fmt.Sprintf("%s/v2/%s/blobs/%s", endpoint, repository, digest)
|
||||
}
|
||||
|
||||
//GetTokenForRepo is used for job handler to get a token for clair.
|
||||
func GetTokenForRepo(repository string) (string, error) {
|
||||
c := &http.Cookie{Name: models.UISecretCookie, Value: config.JobserviceSecret()}
|
||||
credentail := auth.NewCookieCredential(c)
|
||||
t, err := auth.GetToken(config.InternalTokenServiceEndpoint(), true, credentail,
|
||||
[]*token.ResourceActions{&token.ResourceActions{
|
||||
Type: "repository",
|
||||
Name: repository,
|
||||
Actions: []string{"pull"},
|
||||
}})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return t.Token, nil
|
||||
return jobsWithScores, nil
|
||||
}
|
||||
|
@ -1,349 +0,0 @@
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package config provides functions to handle the configurations of job service.
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
jobServiceProtocol = "JOB_SERVICE_PROTOCOL"
|
||||
jobServicePort = "JOB_SERVICE_PORT"
|
||||
jobServiceHTTPCert = "JOB_SERVICE_HTTPS_CERT"
|
||||
jobServiceHTTPKey = "JOB_SERVICE_HTTPS_KEY"
|
||||
jobServiceWorkerPoolBackend = "JOB_SERVICE_POOL_BACKEND"
|
||||
jobServiceWorkers = "JOB_SERVICE_POOL_WORKERS"
|
||||
jobServiceRedisHost = "JOB_SERVICE_POOL_REDIS_HOST"
|
||||
jobServiceRedisPort = "JOB_SERVICE_POOL_REDIS_PORT"
|
||||
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
|
||||
jobServiceLoggerBasePath = "JOB_SERVICE_LOGGER_BASE_PATH"
|
||||
jobServiceLoggerLevel = "JOB_SERVICE_LOGGER_LEVEL"
|
||||
jobServiceLoggerArchivePeriod = "JOB_SERVICE_LOGGER_ARCHIVE_PERIOD"
|
||||
jobServiceAdminServerEndpoint = "ADMINSERVER_URL"
|
||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||
|
||||
//JobServiceProtocolHTTPS points to the 'https' protocol
|
||||
JobServiceProtocolHTTPS = "https"
|
||||
//JobServiceProtocolHTTP points to the 'http' protocol
|
||||
JobServiceProtocolHTTP = "http"
|
||||
|
||||
//JobServicePoolBackendRedis represents redis backend
|
||||
JobServicePoolBackendRedis = "redis"
|
||||
)
|
||||
|
||||
//DefaultConfig is the default configuration reference
|
||||
var DefaultConfig = &Configuration{}
|
||||
|
||||
//Configuration loads and keeps the related configuration items of job service.
|
||||
type Configuration struct {
|
||||
//Protocol server listening on: https/http
|
||||
Protocol string `yaml:"protocol"`
|
||||
|
||||
//Server listening port
|
||||
Port uint `yaml:"port"`
|
||||
|
||||
AdminServer string `yaml:"admin_server"`
|
||||
|
||||
//Additional config when using https
|
||||
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
|
||||
|
||||
//Configurations of worker pool
|
||||
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
|
||||
|
||||
//Logger configurations
|
||||
LoggerConfig *LoggerConfig `yaml:"logger,omitempty"`
|
||||
}
|
||||
|
||||
//HTTPSConfig keeps additional configurations when using https protocol
|
||||
type HTTPSConfig struct {
|
||||
Cert string `yaml:"cert"`
|
||||
Key string `yaml:"key"`
|
||||
}
|
||||
|
||||
//RedisPoolConfig keeps redis pool info.
|
||||
type RedisPoolConfig struct {
|
||||
Host string `yaml:"host"`
|
||||
Port uint `yaml:"port"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
//PoolConfig keeps worker pool configurations.
|
||||
type PoolConfig struct {
|
||||
//0 means unlimited
|
||||
WorkerCount uint `yaml:"workers"`
|
||||
Backend string `yaml:"backend"`
|
||||
RedisPoolCfg *RedisPoolConfig `yaml:"redis_pool,omitempty"`
|
||||
}
|
||||
|
||||
//LoggerConfig keeps logger configurations.
|
||||
type LoggerConfig struct {
|
||||
BasePath string `yaml:"path"`
|
||||
LogLevel string `yaml:"level"`
|
||||
ArchivePeriod uint `yaml:"archive_period"`
|
||||
}
|
||||
|
||||
//Load the configuration options from the specified yaml file.
|
||||
//If the yaml file is specified and existing, load configurations from yaml file first;
|
||||
//If detecting env variables is specified, load configurations from env variables;
|
||||
//Please pay attentions, the detected env variable will override the same configuration item loading from file.
|
||||
//
|
||||
//yamlFilePath string: The path config yaml file
|
||||
//readEnv bool : Whether detect the environment variables or not
|
||||
func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
|
||||
if !utils.IsEmptyStr(yamlFilePath) {
|
||||
//Try to load from file first
|
||||
data, err := ioutil.ReadFile(yamlFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = yaml.Unmarshal(data, c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if detectEnv {
|
||||
//Load from env variables
|
||||
c.loadEnvs()
|
||||
}
|
||||
|
||||
//Validate settings
|
||||
return c.validate()
|
||||
}
|
||||
|
||||
//GetLogBasePath returns the log base path config
|
||||
func GetLogBasePath() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.BasePath
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogLevel returns the log level
|
||||
func GetLogLevel() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.LogLevel
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogArchivePeriod returns the archive period
|
||||
func GetLogArchivePeriod() uint {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.ArchivePeriod
|
||||
}
|
||||
|
||||
return 1 //return default
|
||||
}
|
||||
|
||||
//GetAuthSecret get the auth secret from the env
|
||||
func GetAuthSecret() string {
|
||||
return utils.ReadEnv(jobServiceAuthSecret)
|
||||
}
|
||||
|
||||
//GetAdminServerEndpoint return the admin server endpoint
|
||||
func GetAdminServerEndpoint() string {
|
||||
return DefaultConfig.AdminServer
|
||||
}
|
||||
|
||||
//Load env variables
|
||||
func (c *Configuration) loadEnvs() {
|
||||
prot := utils.ReadEnv(jobServiceProtocol)
|
||||
if !utils.IsEmptyStr(prot) {
|
||||
c.Protocol = prot
|
||||
}
|
||||
|
||||
p := utils.ReadEnv(jobServicePort)
|
||||
if !utils.IsEmptyStr(p) {
|
||||
if po, err := strconv.Atoi(p); err == nil {
|
||||
c.Port = uint(po)
|
||||
}
|
||||
}
|
||||
|
||||
//Only when protocol is https
|
||||
if c.Protocol == JobServiceProtocolHTTPS {
|
||||
cert := utils.ReadEnv(jobServiceHTTPCert)
|
||||
if !utils.IsEmptyStr(cert) {
|
||||
if c.HTTPSConfig != nil {
|
||||
c.HTTPSConfig.Cert = cert
|
||||
} else {
|
||||
c.HTTPSConfig = &HTTPSConfig{
|
||||
Cert: cert,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
certKey := utils.ReadEnv(jobServiceHTTPKey)
|
||||
if !utils.IsEmptyStr(certKey) {
|
||||
if c.HTTPSConfig != nil {
|
||||
c.HTTPSConfig.Key = certKey
|
||||
} else {
|
||||
c.HTTPSConfig = &HTTPSConfig{
|
||||
Key: certKey,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backend := utils.ReadEnv(jobServiceWorkerPoolBackend)
|
||||
if !utils.IsEmptyStr(backend) {
|
||||
if c.PoolConfig == nil {
|
||||
c.PoolConfig = &PoolConfig{}
|
||||
}
|
||||
c.PoolConfig.Backend = backend
|
||||
}
|
||||
|
||||
workers := utils.ReadEnv(jobServiceWorkers)
|
||||
if !utils.IsEmptyStr(workers) {
|
||||
if count, err := strconv.Atoi(workers); err == nil {
|
||||
if c.PoolConfig == nil {
|
||||
c.PoolConfig = &PoolConfig{}
|
||||
}
|
||||
c.PoolConfig.WorkerCount = uint(count)
|
||||
}
|
||||
}
|
||||
|
||||
if c.PoolConfig != nil && c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
rh := utils.ReadEnv(jobServiceRedisHost)
|
||||
if !utils.IsEmptyStr(rh) {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Host = rh
|
||||
}
|
||||
|
||||
rp := utils.ReadEnv(jobServiceRedisPort)
|
||||
if !utils.IsEmptyStr(rp) {
|
||||
if rport, err := strconv.Atoi(rp); err == nil {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Port = uint(rport)
|
||||
}
|
||||
}
|
||||
|
||||
rn := utils.ReadEnv(jobServiceRedisNamespace)
|
||||
if !utils.IsEmptyStr(rn) {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
c.PoolConfig.RedisPoolCfg = &RedisPoolConfig{}
|
||||
}
|
||||
c.PoolConfig.RedisPoolCfg.Namespace = rn
|
||||
}
|
||||
}
|
||||
|
||||
//logger
|
||||
loggerPath := utils.ReadEnv(jobServiceLoggerBasePath)
|
||||
if !utils.IsEmptyStr(loggerPath) {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.BasePath = loggerPath
|
||||
}
|
||||
loggerLevel := utils.ReadEnv(jobServiceLoggerLevel)
|
||||
if !utils.IsEmptyStr(loggerLevel) {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.LogLevel = loggerLevel
|
||||
}
|
||||
archivePeriod := utils.ReadEnv(jobServiceLoggerArchivePeriod)
|
||||
if !utils.IsEmptyStr(archivePeriod) {
|
||||
if period, err := strconv.Atoi(archivePeriod); err == nil {
|
||||
if c.LoggerConfig == nil {
|
||||
c.LoggerConfig = &LoggerConfig{}
|
||||
}
|
||||
c.LoggerConfig.ArchivePeriod = uint(period)
|
||||
}
|
||||
}
|
||||
|
||||
//admin server
|
||||
if adminServer := utils.ReadEnv(jobServiceAdminServerEndpoint); !utils.IsEmptyStr(adminServer) {
|
||||
c.AdminServer = adminServer
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Check if the configurations are valid settings.
|
||||
func (c *Configuration) validate() error {
|
||||
if c.Protocol != JobServiceProtocolHTTPS &&
|
||||
c.Protocol != JobServiceProtocolHTTP {
|
||||
return fmt.Errorf("protocol should be %s or %s, but current setting is %s",
|
||||
JobServiceProtocolHTTP,
|
||||
JobServiceProtocolHTTPS,
|
||||
c.Protocol)
|
||||
}
|
||||
|
||||
if !utils.IsValidPort(c.Port) {
|
||||
return fmt.Errorf("port number should be a none zero integer and less or equal 65535, but current is %d", c.Port)
|
||||
}
|
||||
|
||||
if c.Protocol == JobServiceProtocolHTTPS {
|
||||
if c.HTTPSConfig == nil {
|
||||
return fmt.Errorf("certificate must be configured if serve with protocol %s", c.Protocol)
|
||||
}
|
||||
|
||||
if utils.IsEmptyStr(c.HTTPSConfig.Cert) ||
|
||||
!utils.FileExists(c.HTTPSConfig.Cert) ||
|
||||
utils.IsEmptyStr(c.HTTPSConfig.Key) ||
|
||||
!utils.FileExists(c.HTTPSConfig.Key) {
|
||||
return fmt.Errorf("certificate for protocol %s is not correctly configured", c.Protocol)
|
||||
}
|
||||
}
|
||||
|
||||
if c.PoolConfig == nil {
|
||||
return errors.New("no worker pool is configured")
|
||||
}
|
||||
|
||||
if c.PoolConfig.Backend != JobServicePoolBackendRedis {
|
||||
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
|
||||
}
|
||||
|
||||
//When backend is redis
|
||||
if c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
|
||||
}
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Host) {
|
||||
return errors.New("host of redis pool is empty")
|
||||
}
|
||||
if !utils.IsValidPort(c.PoolConfig.RedisPoolCfg.Port) {
|
||||
return fmt.Errorf("redis port number should be a none zero integer and less or equal 65535, but current is %d", c.PoolConfig.RedisPoolCfg.Port)
|
||||
}
|
||||
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.Namespace) {
|
||||
return errors.New("namespace of redis pool is required")
|
||||
}
|
||||
}
|
||||
|
||||
if c.LoggerConfig == nil {
|
||||
return errors.New("missing logger config")
|
||||
}
|
||||
|
||||
if !utils.DirExists(c.LoggerConfig.BasePath) {
|
||||
return errors.New("logger path should be an existing dir")
|
||||
}
|
||||
|
||||
validLevels := "DEBUG,INFO,WARNING,ERROR,FATAL"
|
||||
if !strings.Contains(validLevels, c.LoggerConfig.LogLevel) {
|
||||
return fmt.Errorf("logger level can only be one of: %s", validLevels)
|
||||
}
|
||||
|
||||
if c.LoggerConfig.ArchivePeriod == 0 {
|
||||
return fmt.Errorf("logger archive period should be greater than 0")
|
||||
}
|
||||
|
||||
if _, err := url.Parse(c.AdminServer); err != nil {
|
||||
return fmt.Errorf("invalid admin server endpoint: %s", err)
|
||||
}
|
||||
|
||||
return nil //valid
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConfigLoadingFailed(t *testing.T) {
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("./config.not-existing.yaml", false); err == nil {
|
||||
t.Fatalf("Load config from none-existing document, expect none nil error but got '%s'\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoadingSucceed(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", false); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLoadingWithEnv(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
setENV()
|
||||
|
||||
cfg := &Configuration{}
|
||||
if err := cfg.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if cfg.Protocol != "https" {
|
||||
t.Fatalf("expect protocol 'https', but got '%s'\n", cfg.Protocol)
|
||||
}
|
||||
if cfg.Port != 8989 {
|
||||
t.Fatalf("expect port 8989 but got '%d'\n", cfg.Port)
|
||||
}
|
||||
if cfg.PoolConfig.WorkerCount != 8 {
|
||||
t.Fatalf("expect workcount 8 but go '%d'\n", cfg.PoolConfig.WorkerCount)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Host != "localhost" {
|
||||
t.Fatalf("expect redis host 'localhost' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Host)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Port != 7379 {
|
||||
t.Fatalf("expect redis port '7379' but got '%d'\n", cfg.PoolConfig.RedisPoolCfg.Port)
|
||||
}
|
||||
if cfg.PoolConfig.RedisPoolCfg.Namespace != "ut_namespace" {
|
||||
t.Fatalf("expect redis namespace 'ut_namespace' but got '%s'\n", cfg.PoolConfig.RedisPoolCfg.Namespace)
|
||||
}
|
||||
if cfg.LoggerConfig.BasePath != "/tmp" {
|
||||
t.Fatalf("expect log base path '/tmp' but got '%s'\n", cfg.LoggerConfig.BasePath)
|
||||
}
|
||||
if cfg.LoggerConfig.LogLevel != "DEBUG" {
|
||||
t.Fatalf("expect log level 'DEBUG' but got '%s'\n", cfg.LoggerConfig.LogLevel)
|
||||
}
|
||||
if cfg.LoggerConfig.ArchivePeriod != 5 {
|
||||
t.Fatalf("expect log archive period 5 but got '%d'\n", cfg.LoggerConfig.ArchivePeriod)
|
||||
}
|
||||
|
||||
unsetENV()
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
if err := CreateLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := DefaultConfig.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if endpoint := GetAdminServerEndpoint(); endpoint != "http://127.0.0.1:8888" {
|
||||
t.Fatalf("expect default admin server endpoint 'http://127.0.0.1:8888' but got '%s'\n", endpoint)
|
||||
}
|
||||
|
||||
if basePath := GetLogBasePath(); basePath != "/tmp/job_logs" {
|
||||
t.Fatalf("expect default logger base path '/tmp/job_logs' but got '%s'\n", basePath)
|
||||
}
|
||||
|
||||
if lvl := GetLogLevel(); lvl != "INFO" {
|
||||
t.Fatalf("expect default logger level 'INFO' but got '%s'\n", lvl)
|
||||
}
|
||||
|
||||
if period := GetLogArchivePeriod(); period != 1 {
|
||||
t.Fatalf("expect default log archive period 1 but got '%d'\n", period)
|
||||
}
|
||||
|
||||
if err := RemoveLogDir(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func setENV() {
|
||||
os.Setenv("JOB_SERVICE_PROTOCOL", "https")
|
||||
os.Setenv("JOB_SERVICE_PORT", "8989")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_CERT", "../server.crt")
|
||||
os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
|
||||
os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
|
||||
os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_HOST", "localhost")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_PORT", "7379")
|
||||
os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_BASE_PATH", "/tmp")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_LEVEL", "DEBUG")
|
||||
os.Setenv("JOB_SERVICE_LOGGER_ARCHIVE_PERIOD", "5")
|
||||
}
|
||||
|
||||
func unsetENV() {
|
||||
os.Unsetenv("JOB_SERVICE_PROTOCOL")
|
||||
os.Unsetenv("JOB_SERVICE_PORT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_CERT")
|
||||
os.Unsetenv("JOB_SERVICE_HTTPS_KEY")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_BACKEND")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_WORKERS")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_HOST")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_PORT")
|
||||
os.Unsetenv("JOB_SERVICE_POOL_REDIS_NAMESPACE")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_BASE_PATH")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_LEVEL")
|
||||
os.Unsetenv("JOB_SERVICE_LOGGER_ARCHIVE_PERIOD")
|
||||
}
|
||||
|
||||
func CreateLogDir() error {
|
||||
return os.MkdirAll("/tmp/job_logs", 0755)
|
||||
}
|
||||
|
||||
func RemoveLogDir() error {
|
||||
return os.Remove("/tmp/job_logs")
|
||||
}
|
@ -1,59 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/vmware/harbor/src/adminserver/client"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/config"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/env"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/job/impl"
|
||||
ilogger "github.com/vmware/harbor/src/jobservice_v2/job/impl/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/logger"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/runtime"
|
||||
"github.com/vmware/harbor/src/jobservice_v2/utils"
|
||||
)
|
||||
|
||||
func main() {
|
||||
//Get parameters
|
||||
configPath := flag.String("c", "", "Specify the yaml config file path")
|
||||
flag.Parse()
|
||||
|
||||
//Missing config file
|
||||
if configPath == nil || utils.IsEmptyStr(*configPath) {
|
||||
fmt.Println("Config file should be specified")
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
//Load configurations
|
||||
if err := config.DefaultConfig.Load(*configPath, true); err != nil {
|
||||
fmt.Printf("Failed to load configurations with error: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
//Set job context initializer
|
||||
runtime.JobService.SetJobContextInitializer(func(ctx *env.Context) (env.JobContext, error) {
|
||||
secret := config.GetAuthSecret()
|
||||
if utils.IsEmptyStr(secret) {
|
||||
return nil, errors.New("empty auth secret")
|
||||
}
|
||||
|
||||
adminClient := client.NewClient(config.GetAdminServerEndpoint(), &client.Config{Secret: secret})
|
||||
jobCtx := impl.NewContext(ctx.SystemContext, adminClient)
|
||||
|
||||
if err := jobCtx.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return jobCtx, nil
|
||||
})
|
||||
|
||||
//New logger for job service
|
||||
sLogger := ilogger.NewServiceLogger(config.GetLogLevel())
|
||||
logger.SetLogger(sLogger)
|
||||
|
||||
//Start
|
||||
runtime.JobService.LoadAndRun()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user