Add test case for exporter

add e2e test to verify exporter and jobservice metrics exist

Signed-off-by: DQ <dengq@vmware.com>
This commit is contained in:
DQ 2021-03-30 08:59:02 +00:00
parent 7eebbeebdf
commit dcb28d8e30
3 changed files with 60 additions and 45 deletions

View File

@ -14,11 +14,11 @@ var (
valueType: prometheus.GaugeValue, valueType: prometheus.GaugeValue,
} }
jobServiceTaskQueueLatency = typedDesc{ jobServiceTaskQueueLatency = typedDesc{
desc: newDescWithLables("", "task_queue_latency", "the time last taks processed", "type"), desc: newDescWithLables("", "task_queue_latency", "how long ago the next job to be processed was enqueued", "type"),
valueType: prometheus.GaugeValue, valueType: prometheus.GaugeValue,
} }
jobServiceConcurrency = typedDesc{ jobServiceConcurrency = typedDesc{
desc: newDescWithLables("", "task_concurrecy", "Total number of concurrency on a pool", "type", "pool"), desc: newDescWithLables("", "task_concurrency", "Total number of concurrency on a pool", "type", "pool"),
valueType: prometheus.GaugeValue, valueType: prometheus.GaugeValue,
} }
jobServiceScheduledJobTotal = typedDesc{ jobServiceScheduledJobTotal = typedDesc{
@ -43,10 +43,9 @@ type JobServiceCollector struct {
// Describe implements prometheus.Collector // Describe implements prometheus.Collector
func (hc *JobServiceCollector) Describe(c chan<- *prometheus.Desc) { func (hc *JobServiceCollector) Describe(c chan<- *prometheus.Desc) {
c <- jobServiceTaskQueueSize.Desc() for _, jd := range hc.getDescribeInfo() {
c <- jobServiceTaskQueueLatency.Desc() c <- jd
c <- jobServiceConcurrency.Desc() }
c <- jobServiceScheduledJobFails.Desc()
} }
// Collect implements prometheus.Collector // Collect implements prometheus.Collector
@ -56,6 +55,16 @@ func (hc *JobServiceCollector) Collect(c chan<- prometheus.Metric) {
} }
} }
func (hc *JobServiceCollector) getDescribeInfo() []*prometheus.Desc {
return []*prometheus.Desc{
jobServiceTaskQueueSize.Desc(),
jobServiceTaskQueueLatency.Desc(),
jobServiceConcurrency.Desc(),
jobServiceScheduledJobTotal.Desc(),
jobServiceScheduledJobFails.Desc(),
}
}
func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric { func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
if CacheEnabled() { if CacheEnabled() {
value, ok := CacheGet(JobServiceCollectorName) value, ok := CacheGet(JobServiceCollectorName)
@ -63,26 +72,9 @@ func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
return value.([]prometheus.Metric) return value.([]prometheus.Metric)
} }
} }
result := []prometheus.Metric{}
// Get concurrency info via raw redis client // Get concurrency info via raw redis client
rdsConn := GetRedisPool().Get() result := getConccurrentInfo()
values, err := redis.Values(rdsConn.Do("SMEMBERS", redisKeyKnownJobs(jsNamespace)))
checkErr(err, "err when get known jobs")
var jobs []string
for _, v := range values {
jobs = append(jobs, string(v.([]byte)))
}
for _, job := range jobs {
values, err := redis.Values(rdsConn.Do("HGETALL", redisKeyJobsLockInfo(jsNamespace, job)))
checkErr(err, "err when get job lock info")
for i := 0; i < len(values); i += 2 {
key, _ := redis.String(values[i], nil)
value, _ := redis.Float64(values[i+1], nil)
result = append(result, jobServiceConcurrency.MustNewConstMetric(value, job, key))
}
}
rdsConn.Close()
// get info via jobservice client // get info via jobservice client
cli := GetBackendWorker() cli := GetBackendWorker()
@ -104,3 +96,22 @@ func (hc *JobServiceCollector) getJobserviceInfo() []prometheus.Metric {
} }
return result return result
} }
func getConccurrentInfo() []prometheus.Metric {
rdsConn := GetRedisPool().Get()
defer rdsConn.Close()
result := []prometheus.Metric{}
knownJobvalues, err := redis.Values(rdsConn.Do("SMEMBERS", redisKeyKnownJobs(jsNamespace)))
checkErr(err, "err when get known jobs")
for _, v := range knownJobvalues {
job := string(v.([]byte))
lockInfovalues, err := redis.Values(rdsConn.Do("HGETALL", redisKeyJobsLockInfo(jsNamespace, job)))
checkErr(err, "err when get job lock info")
for i := 0; i < len(lockInfovalues); i += 2 {
key, _ := redis.String(lockInfovalues[i], nil)
value, _ := redis.Float64(lockInfovalues[i+1], nil)
result = append(result, jobServiceConcurrency.MustNewConstMetric(value, job, key))
}
}
return result
}

View File

@ -29,21 +29,6 @@ type RedisPoolConfig struct {
IdleTimeoutSecond int IdleTimeoutSecond int
} }
// InitRedisCli ...
func InitRedisCli(rdsCfg *RedisPoolConfig) {
pool, err := redislib.GetRedisPool("JobService", rdsCfg.URL, &redislib.PoolParam{
PoolMaxIdle: 6,
PoolIdleTimeout: time.Duration(rdsCfg.IdleTimeoutSecond) * time.Second,
DialConnectionTimeout: dialConnectionTimeout,
DialReadTimeout: dialReadTimeout,
DialWriteTimeout: dialWriteTimeout,
})
if err != nil {
panic(err)
}
redisPool = pool
}
// InitBackendWorker initiate backend worker // InitBackendWorker initiate backend worker
func InitBackendWorker(redisPoolConfig *RedisPoolConfig) { func InitBackendWorker(redisPoolConfig *RedisPoolConfig) {
pool, err := redislib.GetRedisPool("JobService", redisPoolConfig.URL, &redislib.PoolParam{ pool, err := redislib.GetRedisPool("JobService", redisPoolConfig.URL, &redislib.PoolParam{

View File

@ -10,12 +10,27 @@ class TestMetricsExist(unittest.TestCase):
golang_basic_metrics = ["go_gc_duration_seconds", "go_goroutines", "go_info", "go_memstats_alloc_bytes"] golang_basic_metrics = ["go_gc_duration_seconds", "go_goroutines", "go_info", "go_memstats_alloc_bytes"]
metrics = { metrics = {
'core': golang_basic_metrics + ["harbor_core_http_request_total", "harbor_core_http_request_duration_seconds", 'core': golang_basic_metrics + [
"harbor_core_http_inflight_requests"], "harbor_core_http_request_total",
"harbor_core_http_request_duration_seconds",
"harbor_core_http_inflight_requests"],
'registry': golang_basic_metrics + ["registry_http_in_flight_requests"], 'registry': golang_basic_metrics + ["registry_http_in_flight_requests"],
'exporter': golang_basic_metrics + ["artifact_pulled", 'exporter': golang_basic_metrics + [
"harbor_project_artifact_total", "harbor_project_member_total", "harbor_project_quota_byte", "artifact_pulled",
"harbor_project_repo_total", "harbor_project_total", "project_quota_usage_byte"] "harbor_project_artifact_total",
"harbor_project_member_total",
"harbor_project_quota_byte",
"harbor_project_repo_total",
"harbor_project_total",
"project_quota_usage_byte",
"harbor_task_concurrency",
"harbor_task_queue_latency",
"harbor_task_queue_size",
"harbor_task_scheduled_total"],
'jobservice': golang_basic_metrics + [
"harbor_jobservice_info",
"harbor_jobservice_task_process_time_seconds",
"harbor_jobservice_task_total"]
} }
def get_metrics(self): def get_metrics(self):
@ -23,10 +38,14 @@ class TestMetricsExist(unittest.TestCase):
exporter_res = requests.get(metrics_url) exporter_res = requests.get(metrics_url)
core_res = requests.get(metrics_url, params={'comp': 'core'}) core_res = requests.get(metrics_url, params={'comp': 'core'})
reg_res = requests.get(metrics_url, params={'comp': 'registry'}) reg_res = requests.get(metrics_url, params={'comp': 'registry'})
return [('exporter', exporter_res.text), ('core', core_res.text), ('registry', reg_res.text)] js_res = requests.get(metrics_url, params={'comp': 'jobservice'})
return [('exporter', exporter_res.text), ('core', core_res.text), ('registry', reg_res.text), ('jobservice', js_res.text)]
def testMetricsExist(self): def testMetricsExist(self):
for k, metric_text in self.get_metrics(): for k, metric_text in self.get_metrics():
for metric_name in self.metrics[k]: for metric_name in self.metrics[k]:
print("Metric {} should exist in {} ".format(metric_name, k)) print("Metric {} should exist in {} ".format(metric_name, k))
self.assertTrue(metric_name in metric_text) self.assertTrue(metric_name in metric_text)