mirror of
https://github.com/goharbor/harbor.git
synced 2024-12-22 08:38:03 +01:00
Support store job log in DB (#6144)
Signed-off-by: peimingming <peimingming@corp.netease.com>
This commit is contained in:
parent
c4bf65162c
commit
c67fdc40f5
@ -1,2 +1,12 @@
|
||||
ALTER TABLE properties ALTER COLUMN v TYPE varchar(1024);
|
||||
DELETE FROM properties where k='scan_all_policy';
|
||||
|
||||
create table job_log (
|
||||
log_id SERIAL NOT NULL,
|
||||
job_uuid varchar (64) NOT NULL,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
content text,
|
||||
primary key (log_id)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX job_log_uuid ON job_log (job_uuid);
|
||||
|
40
src/common/dao/joblog.go
Normal file
40
src/common/dao/joblog.go
Normal file
@ -0,0 +1,40 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/astaxie/beego/orm"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CreateOrUpdateJobLog ...
|
||||
func CreateOrUpdateJobLog(log *models.JobLog) (int64, error) {
|
||||
o := GetOrmer()
|
||||
count, err := o.InsertOrUpdate(log, "job_uuid")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetJobLog ...
|
||||
func GetJobLog(uuid string) (*models.JobLog, error) {
|
||||
o := GetOrmer()
|
||||
jl := models.JobLog{UUID: uuid}
|
||||
err := o.Read(&jl, "UUID")
|
||||
if err == orm.ErrNoRows {
|
||||
return nil, err
|
||||
}
|
||||
return &jl, nil
|
||||
}
|
||||
|
||||
// DeleteJobLogsBefore ...
|
||||
func DeleteJobLogsBefore(t time.Time) (int64, error) {
|
||||
o := GetOrmer()
|
||||
sql := `delete from job_log where creation_time < ?`
|
||||
res, err := o.Raw(sql, t).Exec()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return res.RowsAffected()
|
||||
}
|
@ -36,5 +36,6 @@ func init() {
|
||||
new(Label),
|
||||
new(ResourceLabel),
|
||||
new(UserGroup),
|
||||
new(AdminJob))
|
||||
new(AdminJob),
|
||||
new(JobLog))
|
||||
}
|
||||
|
23
src/common/models/joblog.go
Normal file
23
src/common/models/joblog.go
Normal file
@ -0,0 +1,23 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// JobLogTable is the name of the table that record the job execution result.
|
||||
const JobLogTable = "job_log"
|
||||
|
||||
// JobLog holds information about logs which are used to record the result of execution of a job.
|
||||
type JobLog struct {
|
||||
LogID int `orm:"pk;auto;column(log_id)" json:"log_id"`
|
||||
UUID string `orm:"column(job_uuid)" json:"uuid"`
|
||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||
Content string `orm:"column(content)" json:"content"`
|
||||
}
|
||||
|
||||
// TableName is required by by beego orm to map JobLog to table job_log
|
||||
func (a *JobLog) TableName() string {
|
||||
return JobLogTable
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/jobservice/job"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
jmodel "github.com/goharbor/harbor/src/jobservice/models"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/sweeper"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -95,7 +96,14 @@ func (c *Context) Init() error {
|
||||
|
||||
db := getDBFromConfig(configs)
|
||||
|
||||
return dao.InitDatabase(db)
|
||||
err = dao.InitDatabase(db)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initialize DB finished
|
||||
initDBCompleted()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build implements the same method in env.JobContext interface
|
||||
@ -237,22 +245,28 @@ func setLoggers(setter func(lg logger.Interface), jobID string) error {
|
||||
lOptions := []logger.Option{}
|
||||
for _, lc := range config.DefaultConfig.JobLoggerConfigs {
|
||||
// For running job, the depth should be 5
|
||||
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameStdOutput {
|
||||
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameStdOutput || lc.Name == logger.LoggerNameDB {
|
||||
if lc.Settings == nil {
|
||||
lc.Settings = map[string]interface{}{}
|
||||
}
|
||||
lc.Settings["depth"] = 5
|
||||
}
|
||||
if lc.Name == logger.LoggerNameFile {
|
||||
if lc.Name == logger.LoggerNameFile || lc.Name == logger.LoggerNameDB {
|
||||
// Need extra param
|
||||
fSettings := map[string]interface{}{}
|
||||
for k, v := range lc.Settings {
|
||||
// Copy settings
|
||||
fSettings[k] = v
|
||||
}
|
||||
// Append file name param
|
||||
fSettings["filename"] = fmt.Sprintf("%s.log", jobID)
|
||||
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, fSettings))
|
||||
if lc.Name == logger.LoggerNameFile {
|
||||
// Append file name param
|
||||
fSettings["filename"] = fmt.Sprintf("%s.log", jobID)
|
||||
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, fSettings))
|
||||
} else { // DB Logger
|
||||
// Append DB key
|
||||
fSettings["key"] = jobID
|
||||
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, fSettings))
|
||||
}
|
||||
} else {
|
||||
lOptions = append(lOptions, logger.BackendOption(lc.Name, lc.Level, lc.Settings))
|
||||
}
|
||||
@ -267,3 +281,8 @@ func setLoggers(setter func(lg logger.Interface), jobID string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initDBCompleted() error {
|
||||
sweeper.PrepareDBSweep()
|
||||
return nil
|
||||
}
|
||||
|
105
src/jobservice/logger/backend/db_logger.go
Normal file
105
src/jobservice/logger/backend/db_logger.go
Normal file
@ -0,0 +1,105 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
)
|
||||
|
||||
// DBLogger is an implementation of logger.Interface.
|
||||
// It outputs logs to PGSql.
|
||||
type DBLogger struct {
|
||||
backendLogger *log.Logger
|
||||
bw *bufio.Writer
|
||||
buffer *bytes.Buffer
|
||||
key string
|
||||
}
|
||||
|
||||
// NewDBLogger crates a new DB logger
|
||||
// nil might be returned
|
||||
func NewDBLogger(key string, level string, depth int) (*DBLogger, error) {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0))
|
||||
bw := bufio.NewWriter(buffer)
|
||||
logLevel := parseLevel(level)
|
||||
|
||||
backendLogger := log.New(bw, log.NewTextFormatter(), logLevel, depth)
|
||||
|
||||
return &DBLogger{
|
||||
backendLogger: backendLogger,
|
||||
bw: bw,
|
||||
buffer: buffer,
|
||||
key: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close the opened io stream and flush data into DB
|
||||
// Implements logger.Closer interface
|
||||
func (dbl *DBLogger) Close() error {
|
||||
err := dbl.bw.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobLog := models.JobLog{
|
||||
UUID: dbl.key,
|
||||
Content: dbl.buffer.String(),
|
||||
}
|
||||
|
||||
_, err = dao.CreateOrUpdateJobLog(&jobLog)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Debug ...
|
||||
func (dbl *DBLogger) Debug(v ...interface{}) {
|
||||
dbl.backendLogger.Debug(v...)
|
||||
}
|
||||
|
||||
// Debugf with format
|
||||
func (dbl *DBLogger) Debugf(format string, v ...interface{}) {
|
||||
dbl.backendLogger.Debugf(format, v...)
|
||||
}
|
||||
|
||||
// Info ...
|
||||
func (dbl *DBLogger) Info(v ...interface{}) {
|
||||
dbl.backendLogger.Info(v...)
|
||||
}
|
||||
|
||||
// Infof with format
|
||||
func (dbl *DBLogger) Infof(format string, v ...interface{}) {
|
||||
dbl.backendLogger.Infof(format, v...)
|
||||
}
|
||||
|
||||
// Warning ...
|
||||
func (dbl *DBLogger) Warning(v ...interface{}) {
|
||||
dbl.backendLogger.Warning(v...)
|
||||
}
|
||||
|
||||
// Warningf with format
|
||||
func (dbl *DBLogger) Warningf(format string, v ...interface{}) {
|
||||
dbl.backendLogger.Warningf(format, v...)
|
||||
}
|
||||
|
||||
// Error ...
|
||||
func (dbl *DBLogger) Error(v ...interface{}) {
|
||||
dbl.backendLogger.Error(v...)
|
||||
}
|
||||
|
||||
// Errorf with format
|
||||
func (dbl *DBLogger) Errorf(format string, v ...interface{}) {
|
||||
dbl.backendLogger.Errorf(format, v...)
|
||||
}
|
||||
|
||||
// Fatal error
|
||||
func (dbl *DBLogger) Fatal(v ...interface{}) {
|
||||
dbl.backendLogger.Fatal(v...)
|
||||
}
|
||||
|
||||
// Fatalf error
|
||||
func (dbl *DBLogger) Fatalf(format string, v ...interface{}) {
|
||||
dbl.backendLogger.Fatalf(format, v...)
|
||||
}
|
1
src/jobservice/logger/backend/db_logger_test.go
Normal file
1
src/jobservice/logger/backend/db_logger_test.go
Normal file
@ -0,0 +1 @@
|
||||
package backend
|
@ -82,3 +82,13 @@ func (e *Entry) Fatalf(format string, v ...interface{}) {
|
||||
l.Fatalf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// Close logger
|
||||
func (e *Entry) Close() error {
|
||||
for _, l := range e.loggers {
|
||||
if closer, ok := l.(Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -62,3 +62,24 @@ func StdFactory(options ...OptionItem) (Interface, error) {
|
||||
|
||||
return backend.NewStdOutputLogger(level, output, depth), nil
|
||||
}
|
||||
|
||||
// DBFactory is factory of file logger
|
||||
func DBFactory(options ...OptionItem) (Interface, error) {
|
||||
var (
|
||||
level, key string
|
||||
depth int
|
||||
)
|
||||
for _, op := range options {
|
||||
switch op.Field() {
|
||||
case "level":
|
||||
level = op.String()
|
||||
case "key":
|
||||
key = op.String()
|
||||
case "depth":
|
||||
depth = op.Int()
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return backend.NewDBLogger(key, level, depth)
|
||||
}
|
||||
|
28
src/jobservice/logger/getter/db_getter.go
Normal file
28
src/jobservice/logger/getter/db_getter.go
Normal file
@ -0,0 +1,28 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
)
|
||||
// DBGetter is responsible for retrieving DB log data
|
||||
type DBGetter struct {
|
||||
}
|
||||
|
||||
// NewDBGetter is constructor of DBGetter
|
||||
func NewDBGetter() *DBGetter {
|
||||
return &DBGetter{}
|
||||
}
|
||||
|
||||
// Retrieve implements @Interface.Retrieve
|
||||
func (dbg *DBGetter) Retrieve(logID string) ([]byte, error) {
|
||||
if len(logID) == 0 {
|
||||
return nil, errors.New("empty log identify")
|
||||
}
|
||||
|
||||
jobLog, err := dao.GetJobLog(logID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []byte(jobLog.Content), nil
|
||||
}
|
@ -25,3 +25,8 @@ func FileGetterFactory(options ...OptionItem) (getter.Interface, error) {
|
||||
|
||||
return getter.NewFileGetter(baseDir), nil
|
||||
}
|
||||
|
||||
// DBGetterFactory creates a getter for the DB logger
|
||||
func DBGetterFactory(options ...OptionItem) (getter.Interface, error) {
|
||||
return getter.NewDBGetter(), nil
|
||||
}
|
||||
|
@ -7,6 +7,8 @@ const (
|
||||
LoggerNameFile = "FILE"
|
||||
// LoggerNameStdOutput is the unique name of the std logger.
|
||||
LoggerNameStdOutput = "STD_OUTPUT"
|
||||
// LoggerNameDB is the unique name of the DB logger.
|
||||
LoggerNameDB = "DB"
|
||||
)
|
||||
|
||||
// Declaration is used to declare a supported logger.
|
||||
@ -28,6 +30,8 @@ var knownLoggers = map[string]*Declaration{
|
||||
LoggerNameFile: {FileFactory, FileSweeperFactory, FileGetterFactory, false},
|
||||
// STD output(both stdout and stderr) logger
|
||||
LoggerNameStdOutput: {StdFactory, nil, nil, true},
|
||||
// DB logger
|
||||
LoggerNameDB: {DBFactory, DBSweeperFactory, DBGetterFactory, false},
|
||||
}
|
||||
|
||||
// IsKnownLogger checks if the logger is supported with name.
|
||||
|
48
src/jobservice/logger/sweeper/db_sweeper.go
Normal file
48
src/jobservice/logger/sweeper/db_sweeper.go
Normal file
@ -0,0 +1,48 @@
|
||||
package sweeper
|
||||
|
||||
import (
|
||||
"time"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
)
|
||||
|
||||
var dbInit = make(chan int, 1)
|
||||
|
||||
// DBSweeper is used to sweep the DB logs
|
||||
type DBSweeper struct {
|
||||
duration int
|
||||
}
|
||||
|
||||
// NewDBSweeper is constructor of DBSweeper
|
||||
func NewDBSweeper(duration int) *DBSweeper {
|
||||
return &DBSweeper{
|
||||
duration: duration,
|
||||
}
|
||||
}
|
||||
|
||||
// Sweep logs
|
||||
func (dbs *DBSweeper) Sweep() (int, error) {
|
||||
// DB initialization not completed, waiting
|
||||
<-dbInit
|
||||
|
||||
// Start to sweep logs
|
||||
before := time.Now().Add(time.Duration(dbs.duration) * oneDay * -1)
|
||||
count, err := dao.DeleteJobLogsBefore(before)
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("sweep logs in DB failed before %s with error: %s", before, err)
|
||||
}
|
||||
|
||||
return int(count), nil
|
||||
}
|
||||
|
||||
// Duration for sweeping
|
||||
func (dbs *DBSweeper) Duration() int {
|
||||
return dbs.duration
|
||||
}
|
||||
|
||||
// prepare sweeping
|
||||
func PrepareDBSweep() error {
|
||||
dbInit <- 1
|
||||
return nil
|
||||
}
|
@ -30,3 +30,19 @@ func FileSweeperFactory(options ...OptionItem) (sweeper.Interface, error) {
|
||||
|
||||
return sweeper.NewFileSweeper(workDir, duration), nil
|
||||
}
|
||||
|
||||
// DBSweeperFactory creates DB sweeper.
|
||||
func DBSweeperFactory(options ...OptionItem) (sweeper.Interface, error) {
|
||||
var duration = 1
|
||||
for _, op := range options {
|
||||
switch op.Field() {
|
||||
case "duration":
|
||||
if op.Int() > 0 {
|
||||
duration = op.Int()
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return sweeper.NewDBSweeper(duration), nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user