Add cache for exporter

Add timed cache for exporter
default cache time is 30s, cleanup job run every 4 hours

Signed-off-by: DQ <dengq@vmware.com>
This commit is contained in:
DQ 2020-11-30 19:50:52 +08:00
parent f0db193895
commit d95f22448c
7 changed files with 152 additions and 36 deletions

View File

@ -2,6 +2,8 @@ HARBOR_EXPORTER_PORT=8080
HARBOR_EXPORTER_METRICS_PATH=/metrics
HARBOR_EXPORTER_METRICS_ENABLED=true
HARBOR_EXPORTER_MAX_REQUESTS=30
HARBOR_EXPORTER_CACHE_TIME=30
HARBOR_EXPORTER_CACHE_CLEAN_INTERVAL=14400
HARBOR_METRIC_NAMESPACE=harbor
HARBOR_METRIC_SUBSYSTEM=exporter
HARBOR_SERVICE_HOST=core

View File

@ -2,7 +2,6 @@ package main
import (
"net/http"
_ "net/http/pprof"
"os"
"strings"
@ -56,16 +55,21 @@ func main() {
TLSEnabled: viper.GetBool("exporter.tls_enabled"),
Certificate: viper.GetString("exporter.tls_cert"),
Key: viper.GetString("exporter.tls_key"),
CacheDuration: viper.GetInt64("exporter.cache_time"),
CacheCleanInterval: viper.GetInt64("exporter.cache_clean_interval"),
}
harborExporter := exporter.NewExporter(exporterOpt)
log.Infof("Starting harbor_exporter with port=%v path=%v metrics=%v max_request=%v tls=%v cert=%v key=%v",
log.Infof("Starting harbor_exporter with port=%v path=%v metrics=%v max_request=%v tls=%v cert=%v key=%v cache_time=%v clean_internal=%v",
exporterOpt.Port,
exporterOpt.MetricsPath,
exporterOpt.ExporterMetricsEnabled,
exporterOpt.MaxRequests,
exporterOpt.TLSEnabled,
exporterOpt.Certificate,
exporterOpt.Key)
exporterOpt.Key,
exporterOpt.CacheDuration,
exporterOpt.CacheCleanInterval,
)
prometheus.MustRegister(harborExporter)
if err := harborExporter.ListenAndServe(); err != nil {
log.Errorf("Error starting Harbor expoter %s", err)

88
src/pkg/exporter/cache.go Normal file
View File

@ -0,0 +1,88 @@
package exporter
import (
"sync"
"time"
)
var c *cache
type cachedValue struct {
Value interface{}
Expiration int64
}
type cache struct {
CacheDuration int64
store map[string]cachedValue
*sync.RWMutex
}
// CacheGet get a value from cache
func CacheGet(key string) (value interface{}, ok bool) {
c.RLock()
v, ok := c.store[key]
c.RUnlock()
if !ok {
return nil, false
}
if time.Now().Unix() > v.Expiration {
c.Lock()
delete(c.store, key)
c.Unlock()
return nil, false
}
return v.Value, true
}
// CachePut put a value to cache with key
func CachePut(key, value interface{}) {
c.Lock()
defer c.Unlock()
c.store[key.(string)] = cachedValue{
Value: value,
Expiration: time.Now().Unix() + c.CacheDuration,
}
}
// CacheDelete delete a key from cache
func CacheDelete(key string) {
c.Lock()
defer c.Unlock()
delete(c.store, key)
}
// StartCacheCleaner start a cache clean job
func StartCacheCleaner() {
now := time.Now().UnixNano()
c.Lock()
defer c.Unlock()
for k, v := range c.store {
if v.Expiration < now {
delete(c.store, k)
}
}
}
// CacheEnabled returns if the cache in exporter enabled
func CacheEnabled() bool {
return c != nil
}
// CacheInit add cache to exporter
func CacheInit(opt *Opt) {
c = &cache{
CacheDuration: opt.CacheDuration,
store: make(map[string]cachedValue),
RWMutex: &sync.RWMutex{},
}
go func() {
ticker := time.NewTicker(time.Duration(opt.CacheCleanInterval) * time.Second)
for {
select {
case <-ticker.C:
StartCacheCleaner()
}
}
}()
}

View File

@ -1,7 +1,6 @@
package exporter
import (
"context"
"errors"
"fmt"
"net/http"
@ -21,17 +20,22 @@ type Opt struct {
TLSEnabled bool
Certificate string
Key string
CacheDuration int64
CacheCleanInterval int64
}
// NewExporter creates a exporter for Harbor with the configuration
func NewExporter(opt *Opt) *Exporter {
exporter := &Exporter{
Opt: opt,
Collectors: make(map[string]prometheus.Collector),
collectors: make(map[string]prometheus.Collector),
}
exporter.RegisterColletor(healthCollectorName, NewHealthCollect(hbrCli))
exporter.RegisterColletor(systemInfoCollectorName, NewSystemInfoCollector(hbrCli))
exporter.RegisterColletor(ProjectCollectorName, NewProjectCollector())
if opt.CacheDuration > 0 {
CacheInit(opt)
}
exporter.RegisterCollector(healthCollectorName, NewHealthCollect(hbrCli))
exporter.RegisterCollector(systemInfoCollectorName, NewSystemInfoCollector(hbrCli))
exporter.RegisterCollector(ProjectCollectorName, NewProjectCollector())
r := prometheus.NewRegistry()
r.MustRegister(exporter)
exporter.Server = newServer(opt, r)
@ -42,19 +46,16 @@ func NewExporter(opt *Opt) *Exporter {
// Exporter is struct for Harbor which can used to connection Harbor and collecting data
type Exporter struct {
*http.Server
Opt *Opt
ctx context.Context
Collectors map[string]prometheus.Collector
Opt *Opt
collectors map[string]prometheus.Collector
}
// RegisterColletor register a collector to expoter
func (e *Exporter) RegisterColletor(name string, c prometheus.Collector) error {
if _, ok := e.Collectors[name]; ok {
// RegisterCollector register a collector to expoter
func (e *Exporter) RegisterCollector(name string, c prometheus.Collector) error {
if _, ok := e.collectors[name]; ok {
return errors.New("Collector name is already registered")
}
e.Collectors[name] = c
e.collectors[name] = c
log.Infof("collector %s registered ...", name)
return nil
}
@ -80,14 +81,14 @@ func newServer(opt *Opt, r *prometheus.Registry) *http.Server {
// Describe implements prometheus.Collector
func (e *Exporter) Describe(c chan<- *prometheus.Desc) {
for _, v := range e.Collectors {
for _, v := range e.collectors {
v.Describe(c)
}
}
// Collect implements prometheus.Collector
func (e *Exporter) Collect(c chan<- prometheus.Metric) {
for _, v := range e.Collectors {
for _, v := range e.collectors {
v.Collect(c)
}
}

View File

@ -49,6 +49,12 @@ func (hc *HealthCollector) Collect(c chan<- prometheus.Metric) {
}
}
func (hc *HealthCollector) getHealthStatus() []prometheus.Metric {
if CacheEnabled() {
value, ok := CacheGet(healthCollectorName)
if ok {
return value.([]prometheus.Metric)
}
}
result := []prometheus.Metric{}
res, err := hbrCli.Get(healthURL)
if err != nil {
@ -62,6 +68,9 @@ func (hc *HealthCollector) getHealthStatus() []prometheus.Metric {
for _, v := range healthResponse.Components {
result = append(result, harborComponentsHealth.MustNewConstMetric(healthy(v.Status), v.Name))
}
if CacheEnabled() {
CachePut(healthCollectorName, result)
}
return result
}

View File

@ -149,13 +149,18 @@ func getPublicValue(public bool) string {
}
func getProjectInfo() *projectOverviewInfo {
if CacheEnabled() {
value, ok := CacheGet(ProjectCollectorName)
if ok {
return value.(*projectOverviewInfo)
}
}
overview := &projectOverviewInfo{}
pc := []projectCount{}
pMap := make(map[int64]*projectInfo)
_, err := dao.GetOrmer().Raw(totalProjectSQL).QueryRows(&pc)
if err != nil {
log.Errorf("get data from DB failure")
}
checkErr(err, "get data from DB failure")
updateProjectBasicInfo(pMap)
updateProjectMemberInfo(pMap)
updateProjectRepoInfo(pMap)
@ -163,15 +168,16 @@ func getProjectInfo() *projectOverviewInfo {
overview.projectTotals = pc
overview.ProjectMap = pMap
if CacheEnabled() {
CachePut(ProjectCollectorName, overview)
}
return overview
}
func updateProjectBasicInfo(projectMap map[int64]*projectInfo) {
pList := make([]*projectInfo, 0)
_, err := dao.GetOrmer().Raw(projectBasicSQL).QueryRows(&pList)
if err != nil {
log.Errorf("get data from DB failure")
}
checkErr(err, "get project from DB failure")
for _, p := range pList {
projectMap[p.ProjectID] = p
}
@ -180,9 +186,7 @@ func updateProjectBasicInfo(projectMap map[int64]*projectInfo) {
func updateProjectMemberInfo(projectMap map[int64]*projectInfo) {
pList := make([]projectInfo, 0)
_, err := dao.GetOrmer().Raw(projectMemberSQL).QueryRows(&pList)
if err != nil {
log.Errorf("get data from DB failure")
}
checkErr(err, "get project member data from DB failure")
for _, p := range pList {
if _, ok := projectMap[p.ProjectID]; ok {
projectMap[p.ProjectID].MemberTotal = p.MemberTotal
@ -190,14 +194,13 @@ func updateProjectMemberInfo(projectMap map[int64]*projectInfo) {
log.Errorf("%v, ID %d", errProjectNotFound, p.ProjectID)
}
}
}
func updateProjectRepoInfo(projectMap map[int64]*projectInfo) {
pList := make([]projectInfo, 0)
_, err := dao.GetOrmer().Raw(projectRepoSQL).QueryRows(&pList)
if err != nil {
log.Errorf("get data from DB failure")
checkErr(err, "get project repo data from DB failure")
}
for _, p := range pList {
if _, ok := projectMap[p.ProjectID]; ok {
@ -212,9 +215,7 @@ func updateProjectRepoInfo(projectMap map[int64]*projectInfo) {
func updateProjectArtifactInfo(projectMap map[int64]*projectInfo) {
aList := make([]artifactInfo, 0)
_, err := dao.GetOrmer().Raw(projectArtifactsSQL).QueryRows(&aList)
if err != nil {
log.Errorf("get data from DB failure")
}
checkErr(err, "get data from DB failure")
for _, a := range aList {
if _, ok := projectMap[a.ProjectID]; ok {
projectMap[a.ProjectID].Artifact = a
@ -224,8 +225,10 @@ func updateProjectArtifactInfo(projectMap map[int64]*projectInfo) {
}
}
func checkErr(err error, msg string) {
if err != nil {
log.Errorf("%s: %v", msg, err)
func checkErr(err error, arg string) {
if err == nil {
return
}
log.Errorf("%s: %v", arg, err)
}

View File

@ -50,6 +50,12 @@ func (hc *SystemInfoCollector) Collect(c chan<- prometheus.Metric) {
}
func (hc *SystemInfoCollector) getSysInfo() []prometheus.Metric {
if CacheEnabled() {
value, ok := CacheGet(systemInfoCollectorName)
if ok {
return value.([]prometheus.Metric)
}
}
result := []prometheus.Metric{}
res, err := hbrCli.Get(sysInfoURL)
if err != nil {
@ -65,6 +71,9 @@ func (hc *SystemInfoCollector) getSysInfo() []prometheus.Metric {
sysInfoResponse.ExternalURL,
sysInfoResponse.HarborVersion,
sysInfoResponse.StorageProvider))
if CacheEnabled() {
CachePut(systemInfoCollectorName, result)
}
return result
}