mirror of
https://github.com/goharbor/harbor.git
synced 2025-01-12 02:41:50 +01:00
fix: golangci-lint errcheck (#16920)
Signed-off-by: Shengwen Yu <yshengwen@vmware.com>
This commit is contained in:
parent
9b22e2943f
commit
b43ba15f40
@ -29,7 +29,7 @@ linters:
|
||||
# - whitespace
|
||||
- bodyclose
|
||||
- deadcode
|
||||
# - errcheck
|
||||
- errcheck
|
||||
# - gosec
|
||||
# - gosimple
|
||||
- govet
|
||||
@ -48,7 +48,7 @@ run:
|
||||
- ".*_test.go"
|
||||
- ".*test.go"
|
||||
skip-dirs:
|
||||
- "testing/"
|
||||
- "testing"
|
||||
timeout: 5m
|
||||
|
||||
issue:
|
||||
|
@ -87,11 +87,13 @@ func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails,
|
||||
if len(chartData.Values) > 0 {
|
||||
c := chartutil.Values(chartData.Values)
|
||||
ValYaml, err := c.YAML()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.Encode(&buf)
|
||||
err = c.Encode(&buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = parseRawValues(buf.Bytes())
|
||||
// Append values.yaml file
|
||||
files[valuesFileName] = ValYaml
|
||||
|
@ -132,7 +132,10 @@ func (rc *Cache) ClearAll() error {
|
||||
// so no gc operation.
|
||||
func (rc *Cache) StartAndGC(config string) error {
|
||||
var cf map[string]string
|
||||
json.Unmarshal([]byte(config), &cf)
|
||||
err := json.Unmarshal([]byte(config), &cf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := cf["key"]; !ok {
|
||||
cf["key"] = DefaultKey
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/job/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
job_models "github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -99,7 +100,10 @@ func NewJobServiceServer() *httptest.Server {
|
||||
panic(err)
|
||||
}
|
||||
jobReq := models.JobRequest{}
|
||||
json.Unmarshal(data, &jobReq)
|
||||
err = json.Unmarshal(data, &jobReq)
|
||||
if err != nil {
|
||||
log.Warningf("failed to unmarshal json to models.JobRequest, error: %v", err)
|
||||
}
|
||||
if jobReq.Job.Name == "replication" {
|
||||
respData := models.JobStats{
|
||||
Stats: &models.StatsInfo{
|
||||
|
@ -295,7 +295,7 @@ func (c *controller) Sync(ctx context.Context, references []distribution.Descrip
|
||||
}
|
||||
|
||||
if len(updating) > 0 {
|
||||
orm.WithTransaction(func(ctx context.Context) error {
|
||||
err := orm.WithTransaction(func(ctx context.Context) error {
|
||||
for _, blob := range updating {
|
||||
if err := c.Update(ctx, blob); err != nil {
|
||||
log.G(ctx).Warningf("Failed to update blob %s, error: %v", blob.Digest, err)
|
||||
@ -305,6 +305,9 @@ func (c *controller) Sync(ctx context.Context, references []distribution.Descrip
|
||||
|
||||
return nil
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-sync-blob"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
|
@ -21,47 +21,47 @@ import (
|
||||
|
||||
func init() {
|
||||
// notification
|
||||
notifier.Subscribe(event.TopicPushArtifact, &artifact.Handler{})
|
||||
notifier.Subscribe(event.TopicPullArtifact, &artifact.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteArtifact, &artifact.Handler{})
|
||||
notifier.Subscribe(event.TopicUploadChart, &chart.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteChart, &chart.Handler{})
|
||||
notifier.Subscribe(event.TopicDownloadChart, &chart.Handler{})
|
||||
notifier.Subscribe(event.TopicQuotaExceed, "a.Handler{})
|
||||
notifier.Subscribe(event.TopicQuotaWarning, "a.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningFailed, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningStopped, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningCompleted, &scan.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteArtifact, &scan.DelArtHandler{})
|
||||
notifier.Subscribe(event.TopicReplication, &artifact.ReplicationHandler{})
|
||||
notifier.Subscribe(event.TopicTagRetention, &artifact.RetentionHandler{})
|
||||
_ = notifier.Subscribe(event.TopicPushArtifact, &artifact.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPullArtifact, &artifact.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteArtifact, &artifact.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicUploadChart, &chart.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteChart, &chart.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDownloadChart, &chart.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicQuotaExceed, "a.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicQuotaWarning, "a.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicScanningFailed, &scan.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicScanningStopped, &scan.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicScanningCompleted, &scan.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteArtifact, &scan.DelArtHandler{})
|
||||
_ = notifier.Subscribe(event.TopicReplication, &artifact.ReplicationHandler{})
|
||||
_ = notifier.Subscribe(event.TopicTagRetention, &artifact.RetentionHandler{})
|
||||
|
||||
// replication
|
||||
notifier.Subscribe(event.TopicPushArtifact, &replication.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteArtifact, &replication.Handler{})
|
||||
notifier.Subscribe(event.TopicCreateTag, &replication.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteTag, &replication.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPushArtifact, &replication.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteArtifact, &replication.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicCreateTag, &replication.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteTag, &replication.Handler{})
|
||||
|
||||
// p2p preheat
|
||||
notifier.Subscribe(event.TopicPushArtifact, &p2p.Handler{})
|
||||
notifier.Subscribe(event.TopicScanningCompleted, &p2p.Handler{})
|
||||
notifier.Subscribe(event.TopicArtifactLabeled, &p2p.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPushArtifact, &p2p.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicScanningCompleted, &p2p.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicArtifactLabeled, &p2p.Handler{})
|
||||
|
||||
// audit logs
|
||||
notifier.Subscribe(event.TopicPushArtifact, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicPullArtifact, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteArtifact, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicCreateProject, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteProject, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteRepository, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicCreateTag, &auditlog.Handler{})
|
||||
notifier.Subscribe(event.TopicDeleteTag, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPushArtifact, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPullArtifact, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteArtifact, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicCreateProject, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteProject, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteRepository, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicCreateTag, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteTag, &auditlog.Handler{})
|
||||
|
||||
// internal
|
||||
notifier.Subscribe(event.TopicPullArtifact, &internal.Handler{})
|
||||
notifier.Subscribe(event.TopicPushArtifact, &internal.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPullArtifact, &internal.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicPushArtifact, &internal.Handler{})
|
||||
|
||||
task.RegisterTaskStatusChangePostFunc(job.Replication, func(ctx context.Context, taskID int64, status string) error {
|
||||
_ = task.RegisterTaskStatusChangePostFunc(job.Replication, func(ctx context.Context, taskID int64, status string) error {
|
||||
notification.AddEvent(ctx, &metadata.ReplicationMetaData{
|
||||
ReplicationTaskID: taskID,
|
||||
Status: status,
|
||||
|
@ -51,7 +51,10 @@ func gcCallback(ctx context.Context, p string) error {
|
||||
func gcTaskStatusChange(ctx context.Context, taskID int64, status string) error {
|
||||
if status == job.SuccessStatus.String() && config.QuotaPerProjectEnable(ctx) {
|
||||
go func() {
|
||||
quota.RefreshForProjects(orm.Context())
|
||||
err := quota.RefreshForProjects(orm.Context())
|
||||
if err != nil {
|
||||
log.Warningf("failed to refresh project quota, error: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,10 @@ func (c *controller) ListPolicies(ctx context.Context, query *q.Query) ([]*model
|
||||
|
||||
func (c *controller) populateRegistry(ctx context.Context, p *pkgmodel.Policy) (*model.Policy, error) {
|
||||
policy := &model.Policy{}
|
||||
policy.From(p)
|
||||
err := policy.From(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var srcRegistryID, destRegistryID int64 = 0, 0
|
||||
if policy.SrcRegistry != nil && policy.SrcRegistry.ID != 0 {
|
||||
srcRegistryID = policy.SrcRegistry.ID
|
||||
|
@ -362,7 +362,8 @@ func (bc *basicController) ScanAll(ctx context.Context, trigger string, async bo
|
||||
return
|
||||
}
|
||||
|
||||
bc.startScanAll(ctx, executionID)
|
||||
err = bc.startScanAll(ctx, executionID)
|
||||
log.Errorf("failed to start scan all, executionID=%d, error: %v", executionID, err)
|
||||
}(bc.makeCtx())
|
||||
} else {
|
||||
if err := bc.startScanAll(ctx, executionID); err != nil {
|
||||
|
@ -41,7 +41,7 @@ func newOptions(options ...Option) *Options {
|
||||
opts := &Options{Ping: true}
|
||||
|
||||
for _, o := range options {
|
||||
o(opts)
|
||||
_ = o(opts)
|
||||
}
|
||||
|
||||
return opts
|
||||
|
@ -155,7 +155,12 @@ func (b *BaseController) WriteYamlData(object interface{}) {
|
||||
|
||||
// PopulateUserSession generates a new session ID and fill the user model in parm to the session
|
||||
func (b *BaseController) PopulateUserSession(u models.User) {
|
||||
b.SessionRegenerateID()
|
||||
err := b.SessionRegenerateID()
|
||||
if err != nil {
|
||||
log.Errorf("failed to generate a new session ID and fill the user mode to this session, error: %v", err)
|
||||
b.SendError(err)
|
||||
return
|
||||
}
|
||||
b.SetSession(userSessionKey, u)
|
||||
}
|
||||
|
||||
|
@ -80,14 +80,20 @@ func (ia *InternalAPI) SyncQuota() {
|
||||
cfgMgr := config.GetCfgManager(ctx)
|
||||
if !cur {
|
||||
cfgMgr.Set(ctx, common.ReadOnly, true)
|
||||
cfgMgr.Save(ctx)
|
||||
err := cfgMgr.Save(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("failed to save context into config manager, error: %v", err)
|
||||
}
|
||||
}
|
||||
// For api call, to avoid the timeout, it should be asynchronous
|
||||
go func() {
|
||||
defer func() {
|
||||
ctx := orm.Context()
|
||||
cfgMgr.Set(ctx, common.ReadOnly, cur)
|
||||
cfgMgr.Save(ctx)
|
||||
err := cfgMgr.Save(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("failed to save context into config manager asynchronously, error: %v", err)
|
||||
}
|
||||
}()
|
||||
log.Info("start to sync quota(API), the system will be set to ReadOnly and back it normal once it done.")
|
||||
ctx := orm.NewContext(context.TODO(), o.NewOrm())
|
||||
|
@ -83,9 +83,12 @@ func (cc *CommonController) Login() {
|
||||
log.Debugf("Redirect user %s to login page of OIDC provider", principal)
|
||||
// Return a json to UI with status code 403, as it cannot handle status 302
|
||||
cc.Ctx.Output.Status = http.StatusForbidden
|
||||
cc.Ctx.Output.JSON(struct {
|
||||
err = cc.Ctx.Output.JSON(struct {
|
||||
Location string `json:"redirect_location"`
|
||||
}{url}, false, false)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to write json to response body, error: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,10 @@ func main() {
|
||||
log.Infof("Version: %s, Git commit: %s", version.ReleaseVersion, version.GitCommit)
|
||||
|
||||
log.Info("Fix empty subiss for meta info data.")
|
||||
oidc.FixEmptySubIss(orm.Context())
|
||||
_, err = oidc.FixEmptySubIss(orm.Context())
|
||||
if err != nil {
|
||||
log.Warningf("oidc.FixEmptySubIss() errors out, error: %v", err)
|
||||
}
|
||||
beego.RunWithMiddleWares("", middlewares.MiddleWares()...)
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,10 @@ func (rp *Provider) SessionExist(sid string) bool {
|
||||
func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
|
||||
ctx := context.TODO()
|
||||
if !rp.SessionExist(oldsid) {
|
||||
rp.c.Save(ctx, sid, "", time.Duration(rp.maxlifetime))
|
||||
err := rp.c.Save(ctx, sid, "", time.Duration(rp.maxlifetime))
|
||||
if err != nil {
|
||||
log.Warningf("failed to save sid=%s, where oldsid=%s, error: %s", sid, oldsid, err)
|
||||
}
|
||||
} else {
|
||||
if rdb, ok := rp.c.(*redis.Cache); ok {
|
||||
// redis has rename command
|
||||
@ -149,8 +152,14 @@ func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rp.c.Delete(ctx, oldsid)
|
||||
rp.c.Save(ctx, sid, kv)
|
||||
err = rp.c.Delete(ctx, oldsid)
|
||||
if err != nil {
|
||||
log.Warningf("failed to delete oldsid=%s, error: %s", oldsid, err)
|
||||
}
|
||||
err = rp.c.Save(ctx, sid, kv)
|
||||
if err != nil {
|
||||
log.Warningf("failed to save sid=%s, error: %s", sid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,11 @@ func (c *Context) Init() error {
|
||||
}
|
||||
|
||||
// Initialize DB finished
|
||||
initDBCompleted()
|
||||
err = initDBCompleted()
|
||||
if err != nil {
|
||||
logger.Errorf("failed to call initDBCompleted(), error: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -216,6 +220,5 @@ func createLoggers(jobID string) (logger.Interface, error) {
|
||||
}
|
||||
|
||||
func initDBCompleted() error {
|
||||
sweeper.PrepareDBSweep()
|
||||
return nil
|
||||
return sweeper.PrepareDBSweep()
|
||||
}
|
||||
|
@ -80,9 +80,9 @@ func (j *Job) Run(ctx job.Context, params job.Parameters) error {
|
||||
return errors.New("`JOB_FAILED` env is set")
|
||||
}
|
||||
|
||||
ctx.Checkin("progress data: %30")
|
||||
_ = ctx.Checkin("progress data: %30")
|
||||
<-time.After(1 * time.Second)
|
||||
ctx.Checkin("progress data: %60")
|
||||
_ = ctx.Checkin("progress data: %60")
|
||||
|
||||
// HOLD ON FOR A WHILE
|
||||
logger.Warning("Holding for 30 seconds")
|
||||
|
@ -35,7 +35,9 @@ import (
|
||||
|
||||
func main() {
|
||||
cfgLib.DefaultCfgManager = common.RestCfgManager
|
||||
cfgLib.DefaultMgr().Load(context.Background())
|
||||
if err := cfgLib.DefaultMgr().Load(context.Background()); err != nil {
|
||||
panic(fmt.Sprintf("failed to load configuration, error: %v", err))
|
||||
}
|
||||
|
||||
// Get parameters
|
||||
configPath := flag.String("c", "", "Specify the yaml config file path")
|
||||
|
7
src/lib/cache/memory/memory.go
vendored
7
src/lib/cache/memory/memory.go
vendored
@ -23,6 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/cache"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
@ -50,7 +51,8 @@ func (c *Cache) Contains(ctx context.Context, key string) bool {
|
||||
}
|
||||
|
||||
if e.(*entry).isExpirated() {
|
||||
c.Delete(ctx, c.opts.Key(key))
|
||||
err := c.Delete(ctx, c.opts.Key(key))
|
||||
log.Errorf("failed to delete cache in Contains() method when it's expired, error: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -72,7 +74,8 @@ func (c *Cache) Fetch(ctx context.Context, key string, value interface{}) error
|
||||
|
||||
e := v.(*entry)
|
||||
if e.isExpirated() {
|
||||
c.Delete(ctx, c.opts.Key(key))
|
||||
err := c.Delete(ctx, c.opts.Key(key))
|
||||
log.Errorf("failed to delete cache in Fetch() method when it's expired, error: %v", err)
|
||||
return cache.ErrNotFound
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,10 @@ func InitWithSettings(cfgs map[string]interface{}, kp ...encrypt.KeyProvider) {
|
||||
Init()
|
||||
DefaultCfgManager = common.InMemoryCfgManager
|
||||
mgr := DefaultMgr()
|
||||
mgr.UpdateConfig(backgroundCtx, cfgs)
|
||||
err := mgr.UpdateConfig(backgroundCtx, cfgs)
|
||||
if err != nil {
|
||||
log.Warningf("failed to update config, error: %v", err)
|
||||
}
|
||||
if len(kp) > 0 {
|
||||
keyProvider = kp[0]
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func (l *Logger) output(record *Record) (err error) {
|
||||
func (l *Logger) Debug(v ...interface{}) {
|
||||
if l.lvl <= DebugLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), DebugLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -196,7 +196,7 @@ func (l *Logger) Debug(v ...interface{}) {
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
if l.lvl <= DebugLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), DebugLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
func (l *Logger) Info(v ...interface{}) {
|
||||
if l.lvl <= InfoLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), InfoLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,7 +212,7 @@ func (l *Logger) Info(v ...interface{}) {
|
||||
func (l *Logger) Infof(format string, v ...interface{}) {
|
||||
if l.lvl <= InfoLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), InfoLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,7 +220,7 @@ func (l *Logger) Infof(format string, v ...interface{}) {
|
||||
func (l *Logger) Warning(v ...interface{}) {
|
||||
if l.lvl <= WarningLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), WarningLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,7 +228,7 @@ func (l *Logger) Warning(v ...interface{}) {
|
||||
func (l *Logger) Warningf(format string, v ...interface{}) {
|
||||
if l.lvl <= WarningLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), WarningLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -236,7 +236,7 @@ func (l *Logger) Warningf(format string, v ...interface{}) {
|
||||
func (l *Logger) Error(v ...interface{}) {
|
||||
if l.lvl <= ErrorLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), ErrorLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,7 +244,7 @@ func (l *Logger) Error(v ...interface{}) {
|
||||
func (l *Logger) Errorf(format string, v ...interface{}) {
|
||||
if l.lvl <= ErrorLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), ErrorLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
}
|
||||
|
||||
@ -252,7 +252,7 @@ func (l *Logger) Errorf(format string, v ...interface{}) {
|
||||
func (l *Logger) Fatal(v ...interface{}) {
|
||||
if l.lvl <= FatalLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprint(v...), l.getLine(), FatalLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
@ -261,7 +261,7 @@ func (l *Logger) Fatal(v ...interface{}) {
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
if l.lvl <= FatalLevel {
|
||||
record := NewRecord(time.Now(), fmt.Sprintf(format, v...), l.getLine(), FatalLevel)
|
||||
l.output(record)
|
||||
_ = l.output(record)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func AsNotFoundError(err error, messageFormat string, args ...interface{}) *erro
|
||||
if errors.Is(err, orm.ErrNoRows) {
|
||||
e := errors.NotFoundError(nil)
|
||||
if len(messageFormat) > 0 {
|
||||
e.WithMessage(messageFormat, args...)
|
||||
_ = e.WithMessage(messageFormat, args...)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ type nopCloser struct {
|
||||
func (n nopCloser) Read(p []byte) (int, error) {
|
||||
num, err := n.ReadSeeker.Read(p)
|
||||
if err == io.EOF { // move to start to have it ready for next read cycle
|
||||
n.Seek(0, io.SeekStart)
|
||||
_, _ = n.Seek(0, io.SeekStart)
|
||||
}
|
||||
return num, err
|
||||
}
|
||||
@ -44,14 +44,14 @@ func (n nopCloser) Close() error {
|
||||
func copyBody(body io.ReadCloser) io.ReadCloser {
|
||||
// check if body was already read and converted into our nopCloser
|
||||
if nc, ok := body.(nopCloser); ok {
|
||||
nc.Seek(0, io.SeekStart)
|
||||
_, _ = nc.Seek(0, io.SeekStart)
|
||||
return body
|
||||
}
|
||||
|
||||
defer body.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
io.Copy(&buf, body)
|
||||
_, _ = io.Copy(&buf, body)
|
||||
|
||||
return nopCloser{bytes.NewReader(buf.Bytes())}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func (c *CfgManager) LoadDefault() {
|
||||
log.Errorf("LoadDefault failed, config item, key: %v, err: %v", item.Name, err)
|
||||
continue
|
||||
}
|
||||
c.Store.Set(item.Name, *cfgValue)
|
||||
_ = c.Store.Set(item.Name, *cfgValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -65,7 +65,7 @@ func (c *CfgManager) LoadSystemConfigFromEnv() {
|
||||
log.Errorf("LoadSystemConfigFromEnv failed, config item, key: %v, err: %v", item.Name, err)
|
||||
continue
|
||||
}
|
||||
c.Store.Set(item.Name, *configValue)
|
||||
_ = c.Store.Set(item.Name, *configValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -149,7 +149,7 @@ func (c *CfgManager) Set(ctx context.Context, key string, value interface{}) {
|
||||
log.Errorf("error when setting key: %v, error %v", key, err)
|
||||
return
|
||||
}
|
||||
c.Store.Set(key, *configValue)
|
||||
_ = c.Store.Set(key, *configValue)
|
||||
}
|
||||
|
||||
// GetDatabaseCfg - Get database configurations
|
||||
|
@ -112,7 +112,10 @@ func (c *ConfigStore) Update(ctx context.Context, cfgMap map[string]interface{})
|
||||
delete(cfgMap, key)
|
||||
continue
|
||||
}
|
||||
c.Set(key, *configValue)
|
||||
if err := c.Set(key, *configValue); err != nil {
|
||||
log.Warningf("failed to update configure item, key=%s, error: %v", key, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Update to driver
|
||||
return c.cfgDriver.Save(ctx, cfgMap)
|
||||
|
@ -33,10 +33,13 @@ func NewExporter(opt *Opt) *Exporter {
|
||||
if opt.CacheDuration > 0 {
|
||||
CacheInit(opt)
|
||||
}
|
||||
exporter.RegisterCollector(NewHealthCollect(hbrCli),
|
||||
err := exporter.RegisterCollector(NewHealthCollect(hbrCli),
|
||||
NewSystemInfoCollector(hbrCli),
|
||||
NewProjectCollector(),
|
||||
NewJobServiceCollector())
|
||||
if err != nil {
|
||||
log.Warningf("calling RegisterCollector() errored out, error: %v", err)
|
||||
}
|
||||
|
||||
r := prometheus.NewRegistry()
|
||||
r.MustRegister(exporter)
|
||||
@ -69,7 +72,7 @@ func newServer(opt *Opt, r *prometheus.Registry) *http.Server {
|
||||
exporterMux := http.NewServeMux()
|
||||
exporterMux.Handle(opt.MetricsPath, promhttp.Handler())
|
||||
exporterMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte(`<html>
|
||||
_, _ = w.Write([]byte(`<html>
|
||||
<head><title>Harbor Exporter</title></head>
|
||||
<body>
|
||||
<h1>Harbor Exporter</h1>
|
||||
|
@ -69,7 +69,11 @@ func (hc *HealthCollector) getHealthStatus() []prometheus.Metric {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
var healthResponse responseHealth
|
||||
json.NewDecoder(res.Body).Decode(&healthResponse)
|
||||
err = json.NewDecoder(res.Body).Decode(&healthResponse)
|
||||
if err != nil {
|
||||
log.Errorf("failed to decode res.Body into healthResponse, error: %v", err)
|
||||
return result
|
||||
}
|
||||
result = append(result, harborHealth.MustNewConstMetric(healthy(healthResponse.Status)))
|
||||
for _, v := range healthResponse.Components {
|
||||
result = append(result, harborComponentsHealth.MustNewConstMetric(healthy(v.Status), v.Name))
|
||||
|
@ -140,7 +140,10 @@ type artifactInfo struct {
|
||||
|
||||
func getQuotaValue(q string) float64 {
|
||||
var quota quotaType
|
||||
json.Unmarshal([]byte(q), "a)
|
||||
err := json.Unmarshal([]byte(q), "a)
|
||||
if err != nil {
|
||||
log.Warningf("failed to unmarshal data into quotaType, error: %v", err)
|
||||
}
|
||||
return quota.Storage
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,11 @@ func (hc *SystemInfoCollector) getSysInfo() []prometheus.Metric {
|
||||
}
|
||||
defer res.Body.Close()
|
||||
var sysInfoResponse responseSysInfo
|
||||
json.NewDecoder(res.Body).Decode(&sysInfoResponse)
|
||||
err = json.NewDecoder(res.Body).Decode(&sysInfoResponse)
|
||||
if err != nil {
|
||||
log.Errorf("failed to decode res.Body into sysInfoResponse, error: %v", err)
|
||||
return result
|
||||
}
|
||||
result = append(result, harborSysInfo.MustNewConstMetric(1,
|
||||
sysInfoResponse.AuthMode,
|
||||
sysInfoResponse.HarborVersion,
|
||||
|
@ -129,7 +129,10 @@ func (d *dao) GetTotalOfProjectMembers(ctx context.Context, projectID int64, que
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
o.Raw(sql, queryParam).QueryRow(&count)
|
||||
err = o.Raw(sql, queryParam).QueryRow(&count)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, err
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ type Trigger struct {
|
||||
// Valid the policy
|
||||
func (s *Schema) Valid(v *validation.Validation) {
|
||||
if len(s.Name) == 0 {
|
||||
v.SetError("name", "cannot be empty")
|
||||
_ = v.SetError("name", "cannot be empty")
|
||||
}
|
||||
|
||||
// valid the filters
|
||||
@ -129,30 +129,30 @@ func (s *Schema) Valid(v *validation.Validation) {
|
||||
case FilterTypeRepository, FilterTypeTag, FilterTypeVulnerability:
|
||||
_, ok := filter.Value.(string)
|
||||
if !ok {
|
||||
v.SetError("filters", "the type of filter value isn't string")
|
||||
_ = v.SetError("filters", "the type of filter value isn't string")
|
||||
break
|
||||
}
|
||||
case FilterTypeSignature:
|
||||
_, ok := filter.Value.(bool)
|
||||
if !ok {
|
||||
v.SetError("filers", "the type of signature filter value isn't bool")
|
||||
_ = v.SetError("filers", "the type of signature filter value isn't bool")
|
||||
break
|
||||
}
|
||||
case FilterTypeLabel:
|
||||
labels, ok := filter.Value.([]interface{})
|
||||
if !ok {
|
||||
v.SetError("filters", "the type of label filter value isn't string slice")
|
||||
_ = v.SetError("filters", "the type of label filter value isn't string slice")
|
||||
break
|
||||
}
|
||||
for _, label := range labels {
|
||||
_, ok := label.(string)
|
||||
if !ok {
|
||||
v.SetError("filters", "the type of label filter value isn't string slice")
|
||||
_ = v.SetError("filters", "the type of label filter value isn't string slice")
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
v.SetError("filters", "invalid filter type")
|
||||
_ = v.SetError("filters", "invalid filter type")
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -163,15 +163,15 @@ func (s *Schema) Valid(v *validation.Validation) {
|
||||
case TriggerTypeManual, TriggerTypeEventBased:
|
||||
case TriggerTypeScheduled:
|
||||
if len(s.Trigger.Settings.Cron) == 0 {
|
||||
v.SetError("trigger", fmt.Sprintf("the cron string cannot be empty when the trigger type is %s", TriggerTypeScheduled))
|
||||
_ = v.SetError("trigger", fmt.Sprintf("the cron string cannot be empty when the trigger type is %s", TriggerTypeScheduled))
|
||||
} else {
|
||||
_, err := utils.CronParser().Parse(s.Trigger.Settings.Cron)
|
||||
if err != nil {
|
||||
v.SetError("trigger", fmt.Sprintf("invalid cron string for scheduled trigger: %s", s.Trigger.Settings.Cron))
|
||||
_ = v.SetError("trigger", fmt.Sprintf("invalid cron string for scheduled trigger: %s", s.Trigger.Settings.Cron))
|
||||
}
|
||||
}
|
||||
default:
|
||||
v.SetError("trigger", "invalid trigger type")
|
||||
_ = v.SetError("trigger", "invalid trigger type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func listConditions(query *q.Query) (string, []interface{}) {
|
||||
|
||||
bytes, err := json.Marshal(query.Keywords)
|
||||
if err == nil {
|
||||
json.Unmarshal(bytes, &q)
|
||||
_ = json.Unmarshal(bytes, &q)
|
||||
}
|
||||
|
||||
if q.ID != 0 {
|
||||
|
@ -352,7 +352,10 @@ func (a *adapter) getTags(repo aliRepo, c *cr.Client) (tags []string, err error)
|
||||
}
|
||||
|
||||
var resp = &aliTagResp{}
|
||||
json.Unmarshal(tagsResp.GetHttpContentBytes(), resp)
|
||||
err = json.Unmarshal(tagsResp.GetHttpContentBytes(), resp)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, tag := range resp.Data.Tags {
|
||||
tags = append(tags, tag.Tag)
|
||||
}
|
||||
|
@ -57,7 +57,10 @@ func (a *aliyunAuthCredential) Modify(r *http.Request) (err error) {
|
||||
return
|
||||
}
|
||||
var v authorizationToken
|
||||
json.Unmarshal(tokenResponse.GetHttpContentBytes(), &v)
|
||||
err = json.Unmarshal(tokenResponse.GetHttpContentBytes(), &v)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
a.cacheTokenExpiredAt = v.Data.ExpireDate.ToTime()
|
||||
a.cacheToken.user = v.Data.TempUserName
|
||||
a.cacheToken.password = v.Data.AuthorizationToken
|
||||
|
@ -112,7 +112,10 @@ func (c *Client) checkHealthy() error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ioutil.ReadAll(resp.Body)
|
||||
_, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
@ -104,7 +104,10 @@ func (c *Client) checkHealthy() error {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ioutil.ReadAll(resp.Body)
|
||||
_, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func (j *Job) Run(ctx job.Context, params job.Parameters) error {
|
||||
authorization, err = makeBasicAuthorization(robotAccount)
|
||||
}
|
||||
if err != nil {
|
||||
logAndWrapError(myLogger, err, "scan job: make authorization")
|
||||
_ = logAndWrapError(myLogger, err, "scan job: make authorization")
|
||||
}
|
||||
|
||||
if shouldStop() {
|
||||
|
@ -84,7 +84,7 @@ func AfterResponse(hook func(http.ResponseWriter, *http.Request, int) error, ski
|
||||
next.ServeHTTP(res, r)
|
||||
|
||||
if err := hook(res, r, res.StatusCode()); err != nil {
|
||||
res.Reset()
|
||||
_ = res.Reset()
|
||||
lib_http.SendError(res, err)
|
||||
}
|
||||
}, skippers...)
|
||||
|
@ -167,7 +167,7 @@ func RequestMiddleware(config RequestConfig, skippers ...middleware.Skipper) fun
|
||||
}
|
||||
}
|
||||
|
||||
res.Reset()
|
||||
_ = res.Reset()
|
||||
|
||||
var errs quota.Errors
|
||||
if errors.As(err, &errs) {
|
||||
|
@ -132,7 +132,10 @@ func handleManifest(w http.ResponseWriter, r *http.Request, next http.Handler) e
|
||||
w.Header().Set(dockerContentDigest, man.Digest)
|
||||
w.Header().Set(etag, man.Digest)
|
||||
if r.Method == http.MethodGet {
|
||||
w.Write(man.Content)
|
||||
_, err = w.Write(man.Content)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func getManifest(w http.ResponseWriter, req *http.Request) {
|
||||
buffer.WriteHeader(http.StatusOK)
|
||||
// write data from cache, no need to write body if is head request
|
||||
if req.Method == http.MethodGet {
|
||||
buffer.Write(manifest)
|
||||
_, _ = buffer.Write(manifest)
|
||||
}
|
||||
} else {
|
||||
log.Warningf("failed to get manifest from cache, error: %v", err)
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/accessory"
|
||||
"github.com/goharbor/harbor/src/pkg/notification"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/report"
|
||||
@ -107,7 +108,7 @@ func (a *artifactAPI) ListArtifacts(ctx context.Context, params operation.ListAr
|
||||
for _, art := range arts {
|
||||
artifact := &model.Artifact{}
|
||||
artifact.Artifact = *art
|
||||
assembler.WithArtifacts(artifact).Assemble(ctx)
|
||||
_ = assembler.WithArtifacts(artifact).Assemble(ctx)
|
||||
artifacts = append(artifacts, artifact.ToSwagger())
|
||||
}
|
||||
|
||||
@ -133,7 +134,10 @@ func (a *artifactAPI) GetArtifact(ctx context.Context, params operation.GetArtif
|
||||
art := &model.Artifact{}
|
||||
art.Artifact = *artifact
|
||||
|
||||
assembler.NewVulAssembler(lib.BoolValue(params.WithScanOverview), parseScanReportMimeTypes(params.XAcceptVulnerabilities)).WithArtifacts(art).Assemble(ctx)
|
||||
err = assembler.NewVulAssembler(lib.BoolValue(params.WithScanOverview), parseScanReportMimeTypes(params.XAcceptVulnerabilities)).WithArtifacts(art).Assemble(ctx)
|
||||
if err != nil {
|
||||
log.Warningf("failed to assemble vulnerabilities with artifact, error: %v", err)
|
||||
}
|
||||
|
||||
return operation.NewGetArtifactOK().WithPayload(art.ToSwagger())
|
||||
}
|
||||
@ -423,7 +427,7 @@ func (a *artifactAPI) GetVulnerabilitiesAddition(ctx context.Context, params ope
|
||||
|
||||
return middleware.ResponderFunc(func(w http.ResponseWriter, p runtime.Producer) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(content)
|
||||
_, _ = w.Write(content)
|
||||
})
|
||||
}
|
||||
|
||||
@ -444,7 +448,7 @@ func (a *artifactAPI) GetAddition(ctx context.Context, params operation.GetAddit
|
||||
|
||||
return middleware.ResponderFunc(func(w http.ResponseWriter, p runtime.Producer) {
|
||||
w.Header().Set("Content-Type", addition.ContentType)
|
||||
w.Write(addition.Content)
|
||||
_, _ = w.Write(addition.Content)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/immutable"
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/immutable/model"
|
||||
handler_model "github.com/goharbor/harbor/src/server/v2.0/handler/model"
|
||||
"github.com/goharbor/harbor/src/server/v2.0/models"
|
||||
@ -36,7 +37,9 @@ func (ia *immutableAPI) CreateImmuRule(ctx context.Context, params operation.Cre
|
||||
}
|
||||
|
||||
metadata := model.Metadata{}
|
||||
lib.JSONCopy(&metadata, params.ImmutableRule)
|
||||
if err := lib.JSONCopy(&metadata, params.ImmutableRule); err != nil {
|
||||
log.Warningf("failed to call JSONCopy into Metadata of the immutable rule when CreateImmuRule, error: %v", err)
|
||||
}
|
||||
|
||||
projectID, err := ia.getProjectID(ctx, projectNameOrID)
|
||||
if err != nil {
|
||||
@ -73,7 +76,9 @@ func (ia *immutableAPI) UpdateImmuRule(ctx context.Context, params operation.Upd
|
||||
}
|
||||
|
||||
metadata := model.Metadata{}
|
||||
lib.JSONCopy(&metadata, params.ImmutableRule)
|
||||
if err := lib.JSONCopy(&metadata, params.ImmutableRule); err != nil {
|
||||
log.Warningf("failed to call JSONCopy into Metadata of the immutable rule when UpdateImmuRule, error: %v", err)
|
||||
}
|
||||
|
||||
projectID, err := ia.getProjectID(ctx, projectNameOrID)
|
||||
if err != nil {
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/pkg/label"
|
||||
pkg_model "github.com/goharbor/harbor/src/pkg/label/model"
|
||||
@ -35,7 +36,9 @@ type labelAPI struct {
|
||||
|
||||
func (lAPI *labelAPI) CreateLabel(ctx context.Context, params operation.CreateLabelParams) middleware.Responder {
|
||||
label := &pkg_model.Label{}
|
||||
lib.JSONCopy(label, params.Label)
|
||||
if err := lib.JSONCopy(label, params.Label); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on Label when CreateLabel, error: %v", err)
|
||||
}
|
||||
|
||||
label.Level = common.LabelLevelUser
|
||||
if label.Scope == common.LabelScopeGlobal {
|
||||
@ -122,7 +125,9 @@ func (lAPI *labelAPI) ListLabels(ctx context.Context, params operation.ListLabel
|
||||
|
||||
func (lAPI *labelAPI) UpdateLabel(ctx context.Context, params operation.UpdateLabelParams) middleware.Responder {
|
||||
labelData := &pkg_model.Label{}
|
||||
lib.JSONCopy(labelData, params.Label)
|
||||
if err := lib.JSONCopy(labelData, params.Label); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on Label when UpdateLabel, error: %v", err)
|
||||
}
|
||||
|
||||
label, err := lAPI.labelMgr.Get(ctx, params.LabelID)
|
||||
if err != nil {
|
||||
|
@ -40,7 +40,10 @@ func (p *Project) ToSwagger() *models.Project {
|
||||
var md *models.ProjectMetadata
|
||||
if p.Metadata != nil {
|
||||
var m models.ProjectMetadata
|
||||
lib.JSONCopy(&m, p.Metadata)
|
||||
err := lib.JSONCopy(&m, p.Metadata)
|
||||
if err != nil {
|
||||
log.Warningf("failed to copy Metadata %T, error: %v", p.Metadata, err)
|
||||
}
|
||||
|
||||
// Transform the severity to severity of CVSS v3.0 Ratings
|
||||
if m.Severity != nil {
|
||||
|
@ -3,6 +3,7 @@ package model
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/retention"
|
||||
"github.com/goharbor/harbor/src/pkg/retention/policy"
|
||||
"github.com/goharbor/harbor/src/server/v2.0/models"
|
||||
@ -16,7 +17,9 @@ type RetentionPolicy struct {
|
||||
// ToSwagger ...
|
||||
func (s *RetentionPolicy) ToSwagger() *models.RetentionPolicy {
|
||||
var result models.RetentionPolicy
|
||||
lib.JSONCopy(&result, s)
|
||||
if err := lib.JSONCopy(&result, s); err != nil {
|
||||
log.Warningf("failed to do JSONCopy on RetentionPolicy, error: %v", err)
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
@ -47,7 +50,9 @@ type RetentionExec struct {
|
||||
// ToSwagger ...
|
||||
func (e *RetentionExec) ToSwagger() *models.RetentionExecution {
|
||||
var result models.RetentionExecution
|
||||
lib.JSONCopy(&result, e)
|
||||
if err := lib.JSONCopy(&result, e); err != nil {
|
||||
log.Warningf("failed to do JSONCopy on RetentionExecution, error: %v", err)
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
@ -64,7 +69,9 @@ type RetentionTask struct {
|
||||
// ToSwagger ...
|
||||
func (e *RetentionTask) ToSwagger() *models.RetentionExecutionTask {
|
||||
var result models.RetentionExecutionTask
|
||||
lib.JSONCopy(&result, e)
|
||||
if err := lib.JSONCopy(&result, e); err != nil {
|
||||
log.Warningf("failed to do JSONCopy on RetentionExecutionTask, error: %v", err)
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/goharbor/harbor/src/controller/robot"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/server/v2.0/models"
|
||||
)
|
||||
|
||||
@ -17,7 +18,9 @@ func (r *Robot) ToSwagger() *models.Robot {
|
||||
perms := []*models.RobotPermission{}
|
||||
for _, p := range r.Permissions {
|
||||
temp := &models.RobotPermission{}
|
||||
lib.JSONCopy(temp, p)
|
||||
if err := lib.JSONCopy(temp, p); err != nil {
|
||||
log.Warningf("failed to do JSONCopy on RobotPermission, error: %v", err)
|
||||
}
|
||||
perms = append(perms, temp)
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/pkg/notification"
|
||||
"github.com/goharbor/harbor/src/pkg/notification/job"
|
||||
@ -83,7 +84,9 @@ func (n *notificationPolicyAPI) CreateWebhookPolicyOfProject(ctx context.Context
|
||||
}
|
||||
|
||||
policy := &policy_model.Policy{}
|
||||
lib.JSONCopy(policy, params.Policy)
|
||||
if err := lib.JSONCopy(policy, params.Policy); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on notification policy when CreateWebhookPolicyOfProject, error: %v", err)
|
||||
}
|
||||
|
||||
if ok, err := n.validateEventTypes(policy); !ok {
|
||||
return n.SendError(ctx, err)
|
||||
@ -113,7 +116,9 @@ func (n *notificationPolicyAPI) UpdateWebhookPolicyOfProject(ctx context.Context
|
||||
}
|
||||
|
||||
policy := &policy_model.Policy{}
|
||||
lib.JSONCopy(policy, params.Policy)
|
||||
if err := lib.JSONCopy(policy, params.Policy); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on notification policy when UpdateWebhookPolicyOfProject, error: %v", err)
|
||||
}
|
||||
|
||||
if ok, err := n.validateEventTypes(policy); !ok {
|
||||
return n.SendError(ctx, err)
|
||||
|
@ -204,7 +204,9 @@ func (a *projectAPI) CreateProject(ctx context.Context, params operation.CreateP
|
||||
OwnerID: ownerID,
|
||||
RegistryID: lib.Int64Value(req.RegistryID),
|
||||
}
|
||||
lib.JSONCopy(&p.Metadata, req.Metadata)
|
||||
if err := lib.JSONCopy(&p.Metadata, req.Metadata); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on project metadata when CreateProject, error: %v", err)
|
||||
}
|
||||
|
||||
projectID, err := a.projectCtl.Create(ctx, p)
|
||||
if err != nil {
|
||||
@ -550,7 +552,9 @@ func (a *projectAPI) UpdateProject(ctx context.Context, params operation.UpdateP
|
||||
if params.Project.Metadata != nil && p.IsProxy() {
|
||||
params.Project.Metadata.EnableContentTrust = nil
|
||||
}
|
||||
lib.JSONCopy(&p.Metadata, params.Project.Metadata)
|
||||
if err := lib.JSONCopy(&p.Metadata, params.Project.Metadata); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on project metadata when UpdateProject, error: %v", err)
|
||||
}
|
||||
|
||||
if err := a.projectCtl.Update(ctx, p); err != nil {
|
||||
return a.SendError(ctx, err)
|
||||
@ -801,7 +805,9 @@ func getProjectRegistrySummary(ctx context.Context, p *project.Project, summary
|
||||
log.Warningf("failed to get registry %d: %v", p.RegistryID, err)
|
||||
} else if registry != nil {
|
||||
registry.Credential = nil
|
||||
lib.JSONCopy(&summary.Registry, registry)
|
||||
if err := lib.JSONCopy(&summary.Registry, registry); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on project registry summary, error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
pkg "github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
"github.com/goharbor/harbor/src/server/v2.0/handler/model"
|
||||
"github.com/goharbor/harbor/src/server/v2.0/models"
|
||||
@ -55,7 +56,9 @@ func (rAPI *robotAPI) CreateRobot(ctx context.Context, params operation.CreateRo
|
||||
Level: params.Robot.Level,
|
||||
}
|
||||
|
||||
lib.JSONCopy(&r.Permissions, params.Robot.Permissions)
|
||||
if err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on robot permission when CreateRobot, error: %v", err)
|
||||
}
|
||||
|
||||
rid, pwd, err := rAPI.robotCtl.Create(ctx, r)
|
||||
if err != nil {
|
||||
@ -307,7 +310,9 @@ func (rAPI *robotAPI) updateV2Robot(ctx context.Context, params operation.Update
|
||||
r.Description = params.Robot.Description
|
||||
r.Disabled = params.Robot.Disable
|
||||
if len(params.Robot.Permissions) != 0 {
|
||||
lib.JSONCopy(&r.Permissions, params.Robot.Permissions)
|
||||
if err := lib.JSONCopy(&r.Permissions, params.Robot.Permissions); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on robot permission when updateV2Robot, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := rAPI.robotCtl.Update(ctx, r, &robot.Option{
|
||||
|
@ -255,7 +255,9 @@ func (rAPI *robotV1API) validate(ctx context.Context, params operation.CreateRob
|
||||
|
||||
for _, policy := range params.Robot.Access {
|
||||
p := &types.Policy{}
|
||||
lib.JSONCopy(p, policy)
|
||||
if err := lib.JSONCopy(p, policy); err != nil {
|
||||
log.Warningf("failed to call JSONCopy on robot access policy when validate, error: %v", err)
|
||||
}
|
||||
if !mp[p.String()] {
|
||||
return errors.New(nil).WithMessage("%s action of %s resource not exist in project %s", policy.Action, policy.Resource, projectNameOrID).WithCode(errors.BadRequestCode)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user