Merge branch 'master' into fix/p2p_provider_health_check

This commit is contained in:
Steven Zou 2020-07-20 16:02:40 +08:00
commit 6b9e0e66c4
159 changed files with 15110 additions and 4802 deletions

View File

@ -116,6 +116,8 @@ jobs:
echo "::set-env name=GOPATH::$(go env GOPATH):$GITHUB_WORKSPACE"
echo "::add-path::$(go env GOPATH)/bin"
echo "::set-env name=TOKEN_PRIVATE_KEY_PATH::${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem"
IP=`hostname -I | awk '{print $1}'`
echo "::set-env name=IP::$IP"
shell: bash
- name: before_install
run: |
@ -129,20 +131,18 @@ jobs:
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
chmod +x docker-compose
sudo mv docker-compose /usr/local/bin
IP=`hostname -I | awk '{print $1}'`
echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json
echo "::set-env name=IP::$IP"
sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
sudo service docker restart
wget https://get.helm.sh/helm-v2.14.1-linux-386.tar.gz && tar zxvf helm-v2.14.1-linux-386.tar.gz
sudo mv linux-386/helm /usr/local/bin/helm2
helm2 init --client-only
helm2 plugin install https://github.com/chartmuseum/helm-push
helm2 plugin list | grep push || helm2 plugin install https://github.com/chartmuseum/helm-push
wget https://get.helm.sh/helm-v3.1.1-linux-386.tar.gz && tar zxvf helm-v3.1.1-linux-386.tar.gz
sudo mv linux-386/helm /usr/local/bin/helm3
helm3 plugin install https://github.com/chartmuseum/helm-push
mkdir -p $CNAB_PATH && cd $CNAB_PATH && git clone https://github.com/cnabio/cnab-to-oci.git
helm3 plugin list | grep push || helm3 plugin install https://github.com/chartmuseum/helm-push
rm -rf $CNAB_PATH;mkdir -p $CNAB_PATH && cd $CNAB_PATH && git clone https://github.com/cnabio/cnab-to-oci.git
cd cnab-to-oci && git checkout v0.3.0-beta4
go list
make build

View File

@ -98,16 +98,19 @@ PKGVERSIONTAG=dev
PREPARE_VERSION_NAME=versions
#versions
REGISTRYVERSION=v2.7.1-patch-2819-2553
REGISTRYVERSION=v2.7.1-patch-2819-2553-redis
NOTARYVERSION=v0.6.1
CLAIRVERSION=v2.1.4
NOTARYMIGRATEVERSION=v3.5.4
CLAIRADAPTERVERSION=v1.0.2
CLAIRADAPTERVERSION=v1.1.0-rc1
TRIVYVERSION=v0.9.1
TRIVYADAPTERVERSION=v0.12.0
TRIVYADAPTERVERSION=v0.13.0
# version of chartmuseum
CHARTMUSEUMVERSION=v0.12.0
CHARTMUSEUMVERSION=v0.12.0-redis
# version of chartmuseum for pulling the source code
CHARTMUSEUM_SRC_TAG=v0.12.0
# version of registry for pulling the source code
REGISTRY_SRC_TAG=v2.7.1
@ -375,7 +378,7 @@ build:
-e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \
-e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRADAPTERVERSION=$(CLAIRADAPTERVERSION) -e VERSIONTAG=$(VERSIONTAG) \
-e BUILDBIN=$(BUILDBIN) \
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e CHARTMUSEUM_SRC_TAG=$(CHARTMUSEUM_SRC_TAG) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \
-e NPM_REGISTRY=$(NPM_REGISTRY) -e BASEIMAGETAG=$(BASEIMAGETAG) -e BASEIMAGENAMESPACE=$(BASEIMAGENAMESPACE) \
-e CLAIRURL=$(CLAIRURL) -e CHARTURL=$(CHARTURL) -e NORARYURL=$(NORARYURL) -e REGISTRYURL=$(REGISTRYURL) -e CLAIR_ADAPTER_DOWNLOAD_URL=$(CLAIR_ADAPTER_DOWNLOAD_URL) \
-e TRIVY_DOWNLOAD_URL=$(TRIVY_DOWNLOAD_URL) -e TRIVY_ADAPTER_DOWNLOAD_URL=$(TRIVY_ADAPTER_DOWNLOAD_URL)
@ -383,7 +386,7 @@ build:
build_base_docker:
@for name in chartserver clair clair-adapter trivy-adapter core db jobservice log nginx notary-server notary-signer portal prepare redis registry registryctl; do \
echo $$name ; \
$(DOCKERBUILD) --pull -f $(MAKEFILEPATH_PHOTON)/$$name/Dockerfile.base -t $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) --label base-build-date=$(date +"%Y%m%d") . && \
$(DOCKERBUILD) --pull --no-cache -f $(MAKEFILEPATH_PHOTON)/$$name/Dockerfile.base -t $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) --label base-build-date=$(date +"%Y%m%d") . && \
$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) $(REGISTRYUSER) $(REGISTRYPASSWORD) || exit 1; \
done

View File

@ -228,6 +228,8 @@ paths:
$ref: '#/responses/403'
'404':
$ref: '#/responses/404'
'405':
$ref: '#/responses/405'
'500':
$ref: '#/responses/500'
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}:
@ -1323,6 +1325,14 @@ responses:
type: string
schema:
$ref: '#/definitions/Errors'
'405':
description: Method not allowed
headers:
X-Request-Id:
description: The ID of the corresponding request for the response
type: string
schema:
$ref: '#/definitions/Errors'
'409':
description: Conflict
headers:

View File

@ -171,9 +171,14 @@ _version: 2.0.0
# Uncomment external_redis if using external Redis server
# external_redis:
# host: redis
# port: 6379
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2

View File

@ -67,3 +67,50 @@ CREATE TABLE IF NOT EXISTS p2p_preheat_policy (
update_time timestamp,
UNIQUE (name, project_id)
);
ALTER TABLE schedule ADD COLUMN IF NOT EXISTS cron varchar(64);
ALTER TABLE schedule ADD COLUMN IF NOT EXISTS execution_id int;
ALTER TABLE schedule ADD COLUMN IF NOT EXISTS callback_func_name varchar(128);
ALTER TABLE schedule ADD COLUMN IF NOT EXISTS callback_func_param text;
/*abstract the cron, callback function parameters from table retention_policy*/
UPDATE schedule
SET cron = retention.cron, callback_func_name = 'RetentionCallback',
callback_func_param=concat('{"PolicyID":', retention.id, ',"Trigger":"Schedule"}')
FROM (
SELECT id, data::json->'trigger'->'references'->>'job_id' AS schedule_id,
data::json->'trigger'->'settings'->>'cron' AS cron
FROM retention_policy
) AS retention
WHERE schedule.id=retention.schedule_id::int;
/*create new execution and task record for each schedule*/
DO $$
DECLARE
sched RECORD;
exec_id integer;
status_code integer;
BEGIN
FOR sched IN SELECT * FROM schedule
LOOP
INSERT INTO execution (vendor_type, trigger) VALUES ('SCHEDULER', 'MANUAL') RETURNING id INTO exec_id;
IF sched.status = 'Pending' THEN
status_code = 0;
ELSIF sched.status = 'Scheduled' THEN
status_code = 1;
ELSIF sched.status = 'Running' THEN
status_code = 2;
ELSIF sched.status = 'Stopped' OR sched.status = 'Error' OR sched.status = 'Success' THEN
status_code = 3;
ELSE
status_code = 0;
END IF;
INSERT INTO task (execution_id, job_id, status, status_code, status_revision, run_count) VALUES (exec_id, sched.job_id, sched.status, status_code, 0, 0);
UPDATE schedule SET execution_id=exec_id WHERE id = sched.id;
END LOOP;
END $$;
ALTER TABLE schedule DROP COLUMN IF EXISTS job_id;
ALTER TABLE schedule DROP COLUMN IF EXISTS status;
ALTER TABLE schedule ADD CONSTRAINT schedule_execution FOREIGN KEY (execution_id) REFERENCES execution(id);

View File

@ -192,7 +192,7 @@ _build_chart_server:
rm -rf $(DOCKERFILEPATH_CHART_SERVER)/binary && mkdir -p $(DOCKERFILEPATH_CHART_SERVER)/binary && \
$(call _get_binary, $(CHARTURL), $(DOCKERFILEPATH_CHART_SERVER)/binary/chartm); \
else \
cd $(DOCKERFILEPATH_CHART_SERVER) && $(DOCKERFILEPATH_CHART_SERVER)/builder $(GOBUILDIMAGE) $(CHART_SERVER_CODE_BASE) $(CHARTMUSEUMVERSION) $(CHART_SERVER_MAIN_PATH) $(CHART_SERVER_BIN_NAME) && cd - ; \
cd $(DOCKERFILEPATH_CHART_SERVER) && $(DOCKERFILEPATH_CHART_SERVER)/builder $(GOBUILDIMAGE) $(CHART_SERVER_CODE_BASE) $(CHARTMUSEUM_SRC_TAG) $(CHART_SERVER_MAIN_PATH) $(CHART_SERVER_BIN_NAME) && cd - ; \
fi ; \
echo "building chartmuseum container for photon..." ; \
$(DOCKERBUILD) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_CHART_SERVER)/$(DOCKERFILENAME_CHART_SERVER) -t $(DOCKERIMAGENAME_CHART_SERVER):$(VERSIONTAG) . ; \

View File

@ -26,6 +26,7 @@ cur=$PWD
mkdir -p binary
rm -rf binary/$BIN_NAME || true
cp compile.sh binary/
cp *.patch binary/
docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME

View File

@ -26,6 +26,11 @@ set -e
cd $SRC_PATH
git checkout tags/$VERSION -b $VERSION
#Patch
for p in $(ls /go/bin/*.patch); do
git apply $p || exit /b 1
done
#Compile
cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
mv $BIN_NAME /go/bin/

View File

@ -0,0 +1,79 @@
diff --git a/cmd/chartmuseum/main.go b/cmd/chartmuseum/main.go
index e2d8ec0..116b1d4 100644
--- a/cmd/chartmuseum/main.go
+++ b/cmd/chartmuseum/main.go
@@ -264,6 +264,8 @@ func storeFromConfig(conf *config.Config) cache.Store {
switch cacheFlag {
case "redis":
store = redisCacheFromConfig(conf)
+ case "redis_sentinel":
+ store = redisSentinelCacheFromConfig(conf)
default:
crash("Unsupported cache store: ", cacheFlag)
}
@@ -280,6 +282,16 @@ func redisCacheFromConfig(conf *config.Config) cache.Store {
))
}
+func redisSentinelCacheFromConfig(conf *config.Config) cache.Store {
+ crashIfConfigMissingVars(conf, []string{"cache.redis.addr", "cache.redis.mastername"})
+ return cache.Store(cache.NewRedisSentinelStore(
+ conf.GetString("cache.redis.mastername"),
+ strings.Split(conf.GetString("cache.redis.addr"), ","),
+ conf.GetString("cache.redis.password"),
+ conf.GetInt("cache.redis.db"),
+ ))
+}
+
func crashIfConfigMissingVars(conf *config.Config, vars []string) {
missing := []string{}
for _, v := range vars {
diff --git a/pkg/cache/redis_sentinel.go b/pkg/cache/redis_sentinel.go
new file mode 100644
index 0000000..0c73427
--- /dev/null
+++ b/pkg/cache/redis_sentinel.go
@@ -0,0 +1,18 @@
+package cache
+
+import (
+ "github.com/go-redis/redis"
+)
+
+// NewRedisStore creates a new RedisStore
+func NewRedisSentinelStore(masterName string, sentinelAddrs []string, password string, db int) *RedisStore {
+ store := &RedisStore{}
+ redisClientOptions := &redis.FailoverOptions{
+ MasterName: masterName,
+ SentinelAddrs: sentinelAddrs,
+ Password: password,
+ DB: db,
+ }
+ store.Client = redis.NewFailoverClient(redisClientOptions)
+ return store
+}
diff --git a/pkg/config/vars.go b/pkg/config/vars.go
index 2b30ec4..603eebc 100644
--- a/pkg/config/vars.go
+++ b/pkg/config/vars.go
@@ -237,10 +237,19 @@ var configVars = map[string]configVar{
Default: "",
CLIFlag: cli.StringFlag{
Name: "cache-redis-addr",
- Usage: "address of Redis service (host:port)",
+ Usage: "address of Redis service (host:port), addresses of Redis+Sentinel service (host1:port1,host2:port2)",
EnvVar: "CACHE_REDIS_ADDR",
},
},
+ "cache.redis.mastername": {
+ Type: stringType,
+ Default: "",
+ CLIFlag: cli.StringFlag{
+ Name: "cache-redis-mastername",
+ Usage: "address of Redis+Sentinel mastername",
+ EnvVar: "CACHE_REDIS_MASTERNAME",
+ },
+ },
"cache.redis.password": {
Type: stringType,
Default: "",

View File

@ -306,9 +306,14 @@ external_database:
{% if external_redis is defined %}
external_redis:
host: {{ external_redis.host }}
port: {{ external_redis.port }}
# support redis, redis+sentinel
# host for redis: <host_redis>:<port_redis>
# host for redis+sentinel:
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
host: {{ external_redis.host }}:{{ external_redis.port }}
password: {{ external_redis.password }}
# sentinel_master_set must be set to support redis+sentinel
#sentinel_master_set:
# db_index 0 is for core, it's unchangeable
registry_db_index: {{ external_redis.registry_db_index }}
jobservice_db_index: {{ external_redis.jobservice_db_index }}
@ -319,9 +324,14 @@ external_redis:
{% else %}
# Umcomments external_redis if using external Redis server
# external_redis:
# host: redis
# port: 6379
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2

View File

@ -11,9 +11,16 @@ PORT=9999
# Only support redis now. If redis is setup, then enable cache
CACHE={{cache_store}}
{% if cache_redis_mastername %}
CACHE_REDIS_ADDR={{cache_redis_addr}}
CACHE_REDIS_MASTERNAME={{cache_redis_mastername}}
CACHE_REDIS_PASSWORD={{cache_redis_password}}
CACHE_REDIS_DB={{cache_redis_db_index}}
{% else %}
CACHE_REDIS_ADDR={{cache_redis_addr}}
CACHE_REDIS_PASSWORD={{cache_redis_password}}
CACHE_REDIS_DB={{cache_redis_db_index}}
{% endif %}
# Credential for internal communication
BASIC_AUTH_USER=chart_controller

View File

@ -1,6 +1,6 @@
CONFIG_PATH=/etc/core/app.conf
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
_REDIS_URL={{redis_host}}:{{redis_port}},100,{{redis_password}},0,{{redis_idle_timeout_seconds}}
_REDIS_URL_CORE={{redis_url_core}}
SYNC_QUOTA=true
CHART_CACHE_DRIVER={{chart_cache_driver}}
_REDIS_URL_REG={{redis_url_reg}}

View File

@ -17,7 +17,13 @@ storage:
disable: true
{% endif %}
redis:
addr: {{redis_host}}:{{redis_port}}
{% if sentinel_master_set %}
# sentinel hosts with comma
addr: {{redis_host}}
sentinelMasterSet: {{sentinel_master_set}}
{% else %}
addr: {{redis_host}}
{% endif %}
password: {{redis_password}}
db: {{redis_db_index_reg}}
http:

View File

@ -1,4 +1,5 @@
SCANNER_LOG_LEVEL={{log_level}}
SCANNER_REDIS_URL={{trivy_redis_url}}
SCANNER_STORE_REDIS_URL={{trivy_redis_url}}
SCANNER_STORE_REDIS_NAMESPACE=harbor.scanner.trivy:store
SCANNER_JOB_QUEUE_REDIS_URL={{trivy_redis_url}}

View File

@ -1,6 +1,7 @@
import os, shutil
import os
from urllib.parse import urlsplit
from g import templates_dir, config_dir, data_dir, DEFAULT_UID, DEFAULT_GID
from .jinja import render_jinja
from .misc import prepare_dir
@ -12,12 +13,29 @@ chart_museum_env = os.path.join(config_dir, "chartserver", "env")
chart_museum_data_dir = os.path.join(data_dir, 'chart_storage')
def prepare_chartmuseum(config_dict):
redis_host = config_dict['redis_host']
redis_port = config_dict['redis_port']
redis_password = config_dict['redis_password']
redis_db_index_chart = config_dict['redis_db_index_chart']
def parse_redis(redis_url_chart):
u = urlsplit(redis_url_chart)
if not u.scheme or u.scheme == 'redis':
return {
'cache_store': 'redis',
'cache_redis_addr': u.netloc.split('@')[-1],
'cache_redis_password': u.password or '',
'cache_redis_db_index': u.path and int(u.path[1:]) or 0,
}
elif u.scheme == 'redis+sentinel':
return {
'cache_store': 'redis_sentinel',
'cache_redis_mastername': u.path.split('/')[1],
'cache_redis_addr': u.netloc.split('@')[-1],
'cache_redis_password': u.password or '',
'cache_redis_db_index': len(u.path.split('/')) == 3 and int(u.path.split('/')[2]) or 0,
}
else:
raise Exception('bad redis url for chart:' + redis_url_chart)
def prepare_chartmuseum(config_dict):
storage_provider_name = config_dict['storage_provider_name']
storage_provider_config_map = config_dict['storage_provider_config']
@ -25,10 +43,7 @@ def prepare_chartmuseum(config_dict):
prepare_dir(chart_museum_config_dir)
# process redis info
cache_store = "redis"
cache_redis_password = redis_password
cache_redis_addr = "{}:{}".format(redis_host, redis_port)
cache_redis_db_index = redis_db_index_chart
cache_redis_ops = parse_redis(config_dict['redis_url_chart'])
# process storage info
@ -85,8 +100,10 @@ def prepare_chartmuseum(config_dict):
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % bucket )
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % endpoint )
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % ( storage_provider_config_map.get("accesskeyid") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % ( storage_provider_config_map.get("accesskeysecret") or '') )
storage_provider_config_options.append(
"ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % (storage_provider_config_map.get("accesskeyid") or ''))
storage_provider_config_options.append(
"ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % (storage_provider_config_map.get("accesskeysecret") or ''))
else:
# use local file system
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
@ -95,15 +112,11 @@ def prepare_chartmuseum(config_dict):
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
render_jinja(
chart_museum_env_temp,
chart_museum_env,
cache_store=cache_store,
cache_redis_addr=cache_redis_addr,
cache_redis_password=cache_redis_password,
cache_redis_db_index=cache_redis_db_index,
core_secret=config_dict['core_secret'],
storage_driver=storage_driver,
all_storage_driver_configs=all_storage_provider_configs,
public_url=config_dict['public_url'],
chart_absolute_url=config_dict['chart_absolute_url'],
internal_tls=config_dict['internal_tls'])
chart_museum_env_temp,
chart_museum_env,
storage_driver=storage_driver,
all_storage_driver_configs=all_storage_provider_configs,
public_url=config_dict['public_url'],
chart_absolute_url=config_dict['chart_absolute_url'],
internal_tls=config_dict['internal_tls'],
**cache_redis_ops)

View File

@ -1,9 +1,9 @@
import logging
import os
import yaml
import logging
from models import InternalTLS
from urllib.parse import urlencode
from g import versions_file_path, host_root_dir, DEFAULT_UID, INTERNAL_NO_PROXY_DN
from models import InternalTLS
from utils.misc import generate_random_string, owner_can_read, other_can_read
default_db_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
@ -73,17 +73,6 @@ def validate(conf: dict, **kwargs):
if uid != DEFAULT_UID and not other_can_read(st_mode):
raise Exception(err_msg)
# Redis validate
redis_host = conf.get("redis_host")
if redis_host is None or len(redis_host) < 1:
raise Exception(
"Error: redis_host in harbor.yml needs to point to an endpoint of Redis server or cluster.")
redis_port = conf.get("redis_port")
if redis_host is None or (redis_port < 1 or redis_port > 65535):
raise Exception(
"Error: redis_port in harbor.yml needs to point to the port of Redis server or cluster.")
# TODO:
# If user enable trust cert dir, need check if the files in this dir is readable.
@ -372,24 +361,38 @@ def parse_yaml_config(config_file_path, with_notary, with_clair, with_trivy, wit
def get_redis_url(db, redis=None):
"""Returns redis url with format `redis://[arbitrary_username:password@]ipaddress:port/database_index`
"""Returns redis url with format `redis://[arbitrary_username:password@]ipaddress:port/database_index?idle_timeout_seconds=30`
>>> get_redis_url(1)
'redis://redis:6379/1'
>>> get_redis_url(1, {'host': 'localhost', 'password': 'password'})
>>> get_redis_url(1, {'host': 'localhost:6379', 'password': 'password'})
'redis://anonymous:password@localhost:6379/1'
>>> get_redis_url(1, {'host':'host1:26379,host2:26379', 'sentinel_master_set':'mymaster', 'password':'password1'})
'redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1'
>>> get_redis_url(1, {'host':'host1:26379,host2:26379', 'sentinel_master_set':'mymaster', 'password':'password1','idle_timeout_seconds':30})
'redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1?idle_timeout_seconds=30'
"""
kwargs = {
'host': 'redis',
'port': 6379,
'host': 'redis:6379',
'password': '',
}
kwargs.update(redis or {})
kwargs['db'] = db
kwargs['scheme'] = kwargs.get('sentinel_master_set', None) and 'redis+sentinel' or 'redis'
kwargs['db_part'] = db and ("/%s" % db) or ""
kwargs['sentinel_part'] = kwargs.get('sentinel_master_set', None) and ("/" + kwargs['sentinel_master_set']) or ''
kwargs['password_part'] = kwargs.get('password', None) and (':%s@' % kwargs['password']) or ''
if kwargs['password']:
return "redis://anonymous:{password}@{host}:{port}/{db}".format(**kwargs)
return "redis://{host}:{port}/{db}".format(**kwargs)
return "{scheme}://{password_part}{host}{sentinel_part}{db_part}".format(**kwargs) + get_redis_url_param(kwargs)
def get_redis_url_param(redis=None):
params = {}
if redis and 'idle_timeout_seconds' in redis:
params['idle_timeout_seconds'] = redis['idle_timeout_seconds']
if params:
return "?" + urlencode(params)
return ""
def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True):
@ -437,8 +440,7 @@ def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True):
# internal redis config as the default
redis = {
'host': 'redis',
'port': 6379,
'host': 'redis:6379',
'password': '',
'registry_db_index': 1,
'jobservice_db_index': 2,
@ -451,23 +453,15 @@ def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True):
# overwriting existing keys by external_redis
redis.update({key: value for (key, value) in external_redis.items() if value})
configs['redis_host'] = redis['host']
configs['redis_port'] = redis['port']
configs['redis_password'] = redis['password']
configs['redis_db_index_reg'] = redis['registry_db_index']
configs['redis_db_index_js'] = redis['jobservice_db_index']
configs['redis_db_index_chart'] = redis['chartmuseum_db_index']
configs['redis_idle_timeout_seconds'] = redis['idle_timeout_seconds']
configs['redis_url_js'] = get_redis_url(configs['redis_db_index_js'], redis)
configs['redis_url_reg'] = get_redis_url(configs['redis_db_index_reg'], redis)
configs['redis_url_core'] = get_redis_url(0, redis)
configs['redis_url_chart'] = get_redis_url(redis['chartmuseum_db_index'], redis)
configs['redis_url_js'] = get_redis_url(redis['jobservice_db_index'], redis)
configs['redis_url_reg'] = get_redis_url(redis['registry_db_index'], redis)
if with_clair:
configs['redis_db_index_clair'] = redis['clair_db_index']
configs['redis_url_clair'] = get_redis_url(configs['redis_db_index_clair'], redis)
configs['redis_url_clair'] = get_redis_url(redis['clair_db_index'], redis)
if with_trivy:
configs['redis_db_index_trivy'] = redis['trivy_db_index']
configs['trivy_redis_url'] = get_redis_url(configs['redis_db_index_trivy'], redis)
configs['trivy_redis_url'] = get_redis_url(redis['trivy_db_index'], redis)
return configs

View File

@ -1,8 +1,8 @@
import shutil, os
import os
import shutil
from g import config_dir, templates_dir, data_dir, DEFAULT_GID, DEFAULT_UID
from utils.misc import prepare_dir, generate_random_string
from utils.jinja import render_jinja
from utils.misc import prepare_dir, generate_random_string
core_config_dir = os.path.join(config_dir, "core", "certificates")
core_env_template_path = os.path.join(templates_dir, "core", "env.jinja")
@ -19,7 +19,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_trivy, with_chartmus
# Render Core
# set cache for chart repo server
# default set 'memory' mode, if redis is configured then set to 'redis'
if len(config_dict['redis_host']) > 0:
if len(config_dict['redis_url_core']) > 0:
chart_cache_driver = "redis"
else:
chart_cache_driver = "memory"

View File

@ -1,10 +1,12 @@
import os, string, sys
import os
import secrets
import string
import sys
from pathlib import Path
from functools import wraps
from g import DEFAULT_UID, DEFAULT_GID, host_root_dir
# To meet security requirement
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID):
@ -52,22 +54,6 @@ def validate(conf, **kwargs):
raise Exception(
"Error: no provider configurations are provided for provider %s" % storage_provider_name)
# Redis validate
redis_host = conf.get("configuration", "redis_host")
if redis_host is None or len(redis_host) < 1:
raise Exception(
"Error: redis_host in harbor.yml needs to point to an endpoint of Redis server or cluster.")
redis_port = conf.get("configuration", "redis_port")
if len(redis_port) < 1:
raise Exception(
"Error: redis_port in harbor.yml needs to point to the port of Redis server or cluster.")
redis_db_index = conf.get("configuration", "redis_db_index").strip()
if len(redis_db_index.split(",")) != 3:
raise Exception(
"Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index)
def validate_crt_subj(dirty_subj):
subj_list = [item for item in dirty_subj.strip().split("/") \
if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0]

View File

@ -1,9 +1,10 @@
import os, copy, subprocess
import copy
import os
import subprocess
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from utils.misc import prepare_dir
from urllib.parse import urlsplit
from utils.jinja import render_jinja
from utils.misc import prepare_dir
registry_config_dir = os.path.join(config_dir, "registry")
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
@ -26,8 +27,11 @@ def prepare_registry(config_dict):
gen_passwd_file(config_dict)
storage_provider_info = get_storage_provider_info(
config_dict['storage_provider_name'],
config_dict['storage_provider_config'])
config_dict['storage_provider_name'],
config_dict['storage_provider_config'])
# process redis info
redis_ops = parse_redis(config_dict['redis_url_reg'])
render_jinja(
registry_config_template_path,
@ -36,9 +40,27 @@ def prepare_registry(config_dict):
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
storage_provider_info=storage_provider_info,
**config_dict)
**config_dict, **redis_ops)
def parse_redis(redis_url):
u = urlsplit(redis_url)
if not u.scheme or u.scheme == 'redis':
return {
'redis_host': u.netloc.split('@')[-1],
'redis_password': u.password or '',
'redis_db_index_reg': u.path and int(u.path[1:]) or 0,
}
elif u.scheme == 'redis+sentinel':
return {
'sentinel_master_set': u.path.split('/')[1],
'redis_host': u.netloc.split('@')[-1],
'redis_password': u.password or '',
'redis_db_index_reg': len(u.path.split('/')) == 3 and int(u.path.split('/')[2]) or 0,
}
else:
raise Exception('bad redis url for registry:' + redis_url)
def get_storage_provider_info(provider_name, provider_config):
provider_config_copy = copy.deepcopy(provider_config)
if provider_name == "filesystem":

View File

@ -27,6 +27,7 @@ echo 'add patch https://github.com/docker/distribution/pull/2879 ...'
cd $TEMP
wget https://github.com/docker/distribution/pull/2879.patch
git apply 2879.patch
git apply $cur/redis.patch
cd $cur
echo 'build the registry binary ...'

View File

@ -0,0 +1,901 @@
diff --git a/configuration/configuration.go b/configuration/configuration.go
index b347d63b..04cdd230 100644
--- a/configuration/configuration.go
+++ b/configuration/configuration.go
@@ -162,6 +162,9 @@ type Configuration struct {
// Addr specifies the the redis instance available to the application.
Addr string `yaml:"addr,omitempty"`
+ // SentinelMasterSet specifies the the redis sentinel master set name.
+ SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"`
+
// Password string to use when making a connection.
Password string `yaml:"password,omitempty"`
diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 978851bb..a8379071 100644
--- a/registry/handlers/app.go
+++ b/registry/handlers/app.go
@@ -3,6 +3,7 @@ package handlers
import (
"context"
cryptorand "crypto/rand"
+ "errors"
"expvar"
"fmt"
"math/rand"
@@ -15,6 +16,8 @@ import (
"strings"
"time"
+ "github.com/FZambia/sentinel"
+
"github.com/docker/distribution"
"github.com/docker/distribution/configuration"
dcontext "github.com/docker/distribution/context"
@@ -24,7 +27,7 @@ import (
"github.com/docker/distribution/notifications"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/api/errcode"
- "github.com/docker/distribution/registry/api/v2"
+ v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
registrymiddleware "github.com/docker/distribution/registry/middleware/registry"
repositorymiddleware "github.com/docker/distribution/registry/middleware/repository"
@@ -498,6 +501,44 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
return
}
+ var getRedisAddr func() (string, error)
+ var testOnBorrow func(c redis.Conn, t time.Time) error
+ if configuration.Redis.SentinelMasterSet != "" {
+ sntnl := &sentinel.Sentinel{
+ Addrs: strings.Split(configuration.Redis.Addr, ","),
+ MasterName: configuration.Redis.SentinelMasterSet,
+ Dial: func(addr string) (redis.Conn, error) {
+ c, err := redis.DialTimeout("tcp", addr,
+ configuration.Redis.DialTimeout,
+ configuration.Redis.ReadTimeout,
+ configuration.Redis.WriteTimeout)
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+ },
+ }
+ getRedisAddr = func() (string, error) {
+ return sntnl.MasterAddr()
+ }
+ testOnBorrow = func(c redis.Conn, t time.Time) error {
+ if !sentinel.TestRole(c, "master") {
+ return errors.New("role check failed")
+ }
+ return nil
+ }
+
+ } else {
+ getRedisAddr = func() (string, error) {
+ return configuration.Redis.Addr, nil
+ }
+ testOnBorrow = func(c redis.Conn, t time.Time) error {
+ // TODO(stevvooe): We can probably do something more interesting
+ // here with the health package.
+ _, err := c.Do("PING")
+ return err
+ }
+ }
pool := &redis.Pool{
Dial: func() (redis.Conn, error) {
// TODO(stevvooe): Yet another use case for contextual timing.
@@ -513,8 +554,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
}
}
- conn, err := redis.DialTimeout("tcp",
- configuration.Redis.Addr,
+ redisAddr, err := getRedisAddr()
+ if err != nil {
+ return nil, err
+ }
+ conn, err := redis.DialTimeout("tcp", redisAddr,
configuration.Redis.DialTimeout,
configuration.Redis.ReadTimeout,
configuration.Redis.WriteTimeout)
@@ -546,16 +590,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
done(nil)
return conn, nil
},
- MaxIdle: configuration.Redis.Pool.MaxIdle,
- MaxActive: configuration.Redis.Pool.MaxActive,
- IdleTimeout: configuration.Redis.Pool.IdleTimeout,
- TestOnBorrow: func(c redis.Conn, t time.Time) error {
- // TODO(stevvooe): We can probably do something more interesting
- // here with the health package.
- _, err := c.Do("PING")
- return err
- },
- Wait: false, // if a connection is not available, proceed without cache.
+ MaxIdle: configuration.Redis.Pool.MaxIdle,
+ MaxActive: configuration.Redis.Pool.MaxActive,
+ IdleTimeout: configuration.Redis.Pool.IdleTimeout,
+ TestOnBorrow: testOnBorrow,
+ Wait: false, // if a connection is not available, proceed without cache.
}
app.redis = pool
diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go
index 12c0b61c..8a644d83 100644
--- a/registry/handlers/app_test.go
+++ b/registry/handlers/app_test.go
@@ -11,7 +11,7 @@ import (
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/api/errcode"
- "github.com/docker/distribution/registry/api/v2"
+ v2 "github.com/docker/distribution/registry/api/v2"
"github.com/docker/distribution/registry/auth"
_ "github.com/docker/distribution/registry/auth/silly"
"github.com/docker/distribution/registry/storage"
@@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) {
// TestNewApp covers the creation of an application via NewApp with a
// configuration.
func TestNewApp(t *testing.T) {
- ctx := context.Background()
+
+ config := configuration.Configuration{
+ Storage: configuration.Storage{
+ "testdriver": nil,
+ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
+ "enabled": false,
+ }},
+ },
+ Auth: configuration.Auth{
+ // For now, we simply test that new auth results in a viable
+ // application.
+ "silly": {
+ "realm": "realm-test",
+ "service": "service-test",
+ },
+ },
+ }
+ runAppWithConfig(t, config)
+}
+
+// TestNewApp covers the creation of an application via NewApp with a
+// configuration(with redis).
+func TestNewAppWithRedis(t *testing.T) {
config := configuration.Configuration{
Storage: configuration.Storage{
"testdriver": nil,
@@ -157,7 +179,38 @@ func TestNewApp(t *testing.T) {
},
},
}
+ config.Redis.Addr = "127.0.0.1:6379"
+ config.Redis.DB = 0
+ runAppWithConfig(t, config)
+}
+// TestNewApp covers the creation of an application via NewApp with a
+// configuration(with redis sentinel cluster).
+func TestNewAppWithRedisSentinelCluster(t *testing.T) {
+ config := configuration.Configuration{
+ Storage: configuration.Storage{
+ "testdriver": nil,
+ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{
+ "enabled": false,
+ }},
+ },
+ Auth: configuration.Auth{
+ // For now, we simply test that new auth results in a viable
+ // application.
+ "silly": {
+ "realm": "realm-test",
+ "service": "service-test",
+ },
+ },
+ }
+ config.Redis.Addr = "192.168.0.11:26379,192.168.0.12:26379"
+ config.Redis.DB = 0
+ config.Redis.SentinelMasterSet = "mymaster"
+ runAppWithConfig(t, config)
+}
+
+func runAppWithConfig(t *testing.T, config configuration.Configuration) {
+ ctx := context.Background()
// Mostly, with this test, given a sane configuration, we are simply
// ensuring that NewApp doesn't panic. We might want to tweak this
// behavior.
diff --git a/vendor.conf b/vendor.conf
index a249caf2..fcc9fee2 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -49,3 +49,4 @@ gopkg.in/yaml.v2 v2.2.1
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
+github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git
diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md
new file mode 100644
index 00000000..f544c54e
--- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/README.md
@@ -0,0 +1,39 @@
+go-sentinel
+===========
+
+Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library.
+
+Documentation
+-------------
+
+- [API Reference](http://godoc.org/github.com/FZambia/sentinel)
+
+Alternative solution
+--------------------
+
+You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA:
+
+```
+listen redis
+ server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2
+ server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup
+ bind *:6379
+ mode tcp
+ option tcpka
+ option tcplog
+ option tcp-check
+ tcp-check send PING\r\n
+ tcp-check expect string +PONG
+ tcp-check send info\ replication\r\n
+ tcp-check expect string role:master
+ tcp-check send QUIT\r\n
+ tcp-check expect string +OK
+ balance roundrobin
+```
+
+This way you don't need to use this library.
+
+License
+-------
+
+Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).
diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go
new file mode 100644
index 00000000..79209e9f
--- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/sentinel.go
@@ -0,0 +1,426 @@
+package sentinel
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+)
+
+// Sentinel provides a way to add high availability (HA) to Redis Pool using
+// preconfigured addresses of Sentinel servers and name of master which Sentinels
+// monitor. It works with Redis >= 2.8.12 (mostly because of ROLE command that
+// was introduced in that version, it's possible though to support old versions
+// using INFO command).
+//
+// Example of the simplest usage to contact master "mymaster":
+//
+// func newSentinelPool() *redis.Pool {
+// sntnl := &sentinel.Sentinel{
+// Addrs: []string{":26379", ":26380", ":26381"},
+// MasterName: "mymaster",
+// Dial: func(addr string) (redis.Conn, error) {
+// timeout := 500 * time.Millisecond
+// c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout)
+// if err != nil {
+// return nil, err
+// }
+// return c, nil
+// },
+// }
+// return &redis.Pool{
+// MaxIdle: 3,
+// MaxActive: 64,
+// Wait: true,
+// IdleTimeout: 240 * time.Second,
+// Dial: func() (redis.Conn, error) {
+// masterAddr, err := sntnl.MasterAddr()
+// if err != nil {
+// return nil, err
+// }
+// c, err := redis.Dial("tcp", masterAddr)
+// if err != nil {
+// return nil, err
+// }
+// return c, nil
+// },
+// TestOnBorrow: func(c redis.Conn, t time.Time) error {
+// if !sentinel.TestRole(c, "master") {
+// return errors.New("Role check failed")
+// } else {
+// return nil
+// }
+// },
+// }
+// }
+type Sentinel struct {
+ // Addrs is a slice with known Sentinel addresses.
+ Addrs []string
+
+ // MasterName is a name of Redis master Sentinel servers monitor.
+ MasterName string
+
+ // Dial is a user supplied function to connect to Sentinel on given address. This
+ // address will be chosen from Addrs slice.
+ // Note that as per the redis-sentinel client guidelines, a timeout is mandatory
+ // while connecting to Sentinels, and should not be set to 0.
+ Dial func(addr string) (redis.Conn, error)
+
+ // Pool is a user supplied function returning custom connection pool to Sentinel.
+ // This can be useful to tune options if you are not satisfied with what default
+ // Sentinel pool offers. See defaultPool() method for default pool implementation.
+ // In most cases you only need to provide Dial function and let this be nil.
+ Pool func(addr string) *redis.Pool
+
+ mu sync.RWMutex
+ pools map[string]*redis.Pool
+ addr string
+}
+
+// NoSentinelsAvailable is returned when all sentinels in the list are exhausted
+// (or none configured), and contains the last error returned by Dial (which
+// may be nil)
+type NoSentinelsAvailable struct {
+ lastError error
+}
+
+func (ns NoSentinelsAvailable) Error() string {
+ if ns.lastError != nil {
+ return fmt.Sprintf("redigo: no sentinels available; last error: %s", ns.lastError.Error())
+ }
+ return fmt.Sprintf("redigo: no sentinels available")
+}
+
+// putToTop puts Sentinel address to the top of address list - this means
+// that all next requests will use Sentinel on this address first.
+//
+// From Sentinel guidelines:
+//
+// The first Sentinel replying to the client request should be put at the
+// start of the list, so that at the next reconnection, we'll try first
+// the Sentinel that was reachable in the previous connection attempt,
+// minimizing latency.
+//
+// Lock must be held by caller.
+func (s *Sentinel) putToTop(addr string) {
+ addrs := s.Addrs
+ if addrs[0] == addr {
+ // Already on top.
+ return
+ }
+ newAddrs := []string{addr}
+ for _, a := range addrs {
+ if a == addr {
+ continue
+ }
+ newAddrs = append(newAddrs, a)
+ }
+ s.Addrs = newAddrs
+}
+
+// putToBottom puts Sentinel address to the bottom of address list.
+// We call this method internally when see that some Sentinel failed to answer
+// on application request so next time we start with another one.
+//
+// Lock must be held by caller.
+func (s *Sentinel) putToBottom(addr string) {
+ addrs := s.Addrs
+ if addrs[len(addrs)-1] == addr {
+ // Already on bottom.
+ return
+ }
+ newAddrs := []string{}
+ for _, a := range addrs {
+ if a == addr {
+ continue
+ }
+ newAddrs = append(newAddrs, a)
+ }
+ newAddrs = append(newAddrs, addr)
+ s.Addrs = newAddrs
+}
+
+// defaultPool returns a connection pool to one Sentinel. This allows
+// us to call concurrent requests to Sentinel using connection Do method.
+func (s *Sentinel) defaultPool(addr string) *redis.Pool {
+ return &redis.Pool{
+ MaxIdle: 3,
+ MaxActive: 10,
+ Wait: true,
+ IdleTimeout: 240 * time.Second,
+ Dial: func() (redis.Conn, error) {
+ return s.Dial(addr)
+ },
+ TestOnBorrow: func(c redis.Conn, t time.Time) error {
+ _, err := c.Do("PING")
+ return err
+ },
+ }
+}
+
+func (s *Sentinel) get(addr string) redis.Conn {
+ pool := s.poolForAddr(addr)
+ return pool.Get()
+}
+
+func (s *Sentinel) poolForAddr(addr string) *redis.Pool {
+ s.mu.Lock()
+ if s.pools == nil {
+ s.pools = make(map[string]*redis.Pool)
+ }
+ pool, ok := s.pools[addr]
+ if ok {
+ s.mu.Unlock()
+ return pool
+ }
+ s.mu.Unlock()
+ newPool := s.newPool(addr)
+ s.mu.Lock()
+ p, ok := s.pools[addr]
+ if ok {
+ s.mu.Unlock()
+ return p
+ }
+ s.pools[addr] = newPool
+ s.mu.Unlock()
+ return newPool
+}
+
+func (s *Sentinel) newPool(addr string) *redis.Pool {
+ if s.Pool != nil {
+ return s.Pool(addr)
+ }
+ return s.defaultPool(addr)
+}
+
+// close connection pool to Sentinel.
+// Lock must be hold by caller.
+func (s *Sentinel) close() {
+ if s.pools != nil {
+ for _, pool := range s.pools {
+ pool.Close()
+ }
+ }
+ s.pools = nil
+}
+
+func (s *Sentinel) doUntilSuccess(f func(redis.Conn) (interface{}, error)) (interface{}, error) {
+ s.mu.RLock()
+ addrs := s.Addrs
+ s.mu.RUnlock()
+
+ var lastErr error
+
+ for _, addr := range addrs {
+ conn := s.get(addr)
+ reply, err := f(conn)
+ conn.Close()
+ if err != nil {
+ lastErr = err
+ s.mu.Lock()
+ pool, ok := s.pools[addr]
+ if ok {
+ pool.Close()
+ delete(s.pools, addr)
+ }
+ s.putToBottom(addr)
+ s.mu.Unlock()
+ continue
+ }
+ s.putToTop(addr)
+ return reply, nil
+ }
+
+ return nil, NoSentinelsAvailable{lastError: lastErr}
+}
+
+// MasterAddr returns an address of current Redis master instance.
+func (s *Sentinel) MasterAddr() (string, error) {
+ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) {
+ return queryForMaster(c, s.MasterName)
+ })
+ if err != nil {
+ return "", err
+ }
+ return res.(string), nil
+}
+
+// SlaveAddrs returns a slice with known slave addresses of current master instance.
+func (s *Sentinel) SlaveAddrs() ([]string, error) {
+ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) {
+ return queryForSlaveAddrs(c, s.MasterName)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res.([]string), nil
+}
+
+// Slave represents a Redis slave instance which is known by Sentinel.
+type Slave struct {
+ ip string
+ port string
+ flags string
+}
+
+// Addr returns an address of slave.
+func (s *Slave) Addr() string {
+ return net.JoinHostPort(s.ip, s.port)
+}
+
+// Available returns if slave is in working state at moment based on information in slave flags.
+func (s *Slave) Available() bool {
+ return !strings.Contains(s.flags, "disconnected") && !strings.Contains(s.flags, "s_down")
+}
+
+// Slaves returns a slice with known slaves of master instance.
+func (s *Sentinel) Slaves() ([]*Slave, error) {
+ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) {
+ return queryForSlaves(c, s.MasterName)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res.([]*Slave), nil
+}
+
+// SentinelAddrs returns a slice of known Sentinel addresses Sentinel server aware of.
+func (s *Sentinel) SentinelAddrs() ([]string, error) {
+ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) {
+ return queryForSentinels(c, s.MasterName)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res.([]string), nil
+}
+
+// Discover allows to update list of known Sentinel addresses. From docs:
+//
+// A client may update its internal list of Sentinel nodes following this procedure:
+// 1) Obtain a list of other Sentinels for this master using the command SENTINEL sentinels <master-name>.
+// 2) Add every ip:port pair not already existing in our list at the end of the list.
+func (s *Sentinel) Discover() error {
+ addrs, err := s.SentinelAddrs()
+ if err != nil {
+ return err
+ }
+ s.mu.Lock()
+ for _, addr := range addrs {
+ if !stringInSlice(addr, s.Addrs) {
+ s.Addrs = append(s.Addrs, addr)
+ }
+ }
+ s.mu.Unlock()
+ return nil
+}
+
+// Close closes current connection to Sentinel.
+func (s *Sentinel) Close() error {
+ s.mu.Lock()
+ s.close()
+ s.mu.Unlock()
+ return nil
+}
+
+// TestRole wraps GetRole in a test to verify if the role matches an expected
+// role string. If there was any error in querying the supplied connection,
+// the function returns false. Works with Redis >= 2.8.12.
+// It's not goroutine safe, but if you call this method on pooled connections
+// then you are OK.
+func TestRole(c redis.Conn, expectedRole string) bool {
+ role, err := getRole(c)
+ if err != nil || role != expectedRole {
+ return false
+ }
+ return true
+}
+
+// getRole is a convenience function supplied to query an instance (master or
+// slave) for its role. It attempts to use the ROLE command introduced in
+// redis 2.8.12.
+func getRole(c redis.Conn) (string, error) {
+ res, err := c.Do("ROLE")
+ if err != nil {
+ return "", err
+ }
+ rres, ok := res.([]interface{})
+ if ok {
+ return redis.String(rres[0], nil)
+ }
+ return "", errors.New("redigo: can not transform ROLE reply to string")
+}
+
+func queryForMaster(conn redis.Conn, masterName string) (string, error) {
+ res, err := redis.Strings(conn.Do("SENTINEL", "get-master-addr-by-name", masterName))
+ if err != nil {
+ return "", err
+ }
+ if len(res) < 2 {
+ return "", errors.New("redigo: malformed get-master-addr-by-name reply")
+ }
+ masterAddr := net.JoinHostPort(res[0], res[1])
+ return masterAddr, nil
+}
+
+func queryForSlaveAddrs(conn redis.Conn, masterName string) ([]string, error) {
+ slaves, err := queryForSlaves(conn, masterName)
+ if err != nil {
+ return nil, err
+ }
+ slaveAddrs := make([]string, 0)
+ for _, slave := range slaves {
+ slaveAddrs = append(slaveAddrs, slave.Addr())
+ }
+ return slaveAddrs, nil
+}
+
+func queryForSlaves(conn redis.Conn, masterName string) ([]*Slave, error) {
+ res, err := redis.Values(conn.Do("SENTINEL", "slaves", masterName))
+ if err != nil {
+ return nil, err
+ }
+ slaves := make([]*Slave, 0)
+ for _, a := range res {
+ sm, err := redis.StringMap(a, err)
+ if err != nil {
+ return slaves, err
+ }
+ slave := &Slave{
+ ip: sm["ip"],
+ port: sm["port"],
+ flags: sm["flags"],
+ }
+ slaves = append(slaves, slave)
+ }
+ return slaves, nil
+}
+
+func queryForSentinels(conn redis.Conn, masterName string) ([]string, error) {
+ res, err := redis.Values(conn.Do("SENTINEL", "sentinels", masterName))
+ if err != nil {
+ return nil, err
+ }
+ sentinels := make([]string, 0)
+ for _, a := range res {
+ sm, err := redis.StringMap(a, err)
+ if err != nil {
+ return sentinels, err
+ }
+ sentinels = append(sentinels, fmt.Sprintf("%s:%s", sm["ip"], sm["port"]))
+ }
+ return sentinels, nil
+}
+
+func stringInSlice(str string, slice []string) bool {
+ for _, s := range slice {
+ if s == str {
+ return true
+ }
+ }
+ return false
+}

View File

@ -14,13 +14,14 @@ import (
)
const (
standardExpireTime = 3600 * time.Second
redisENVKey = "_REDIS_URL"
cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis"
cacheDriverMem = "memory"
cacheDriverRedis = "redis"
cacheCollectionName = "helm_chart_cache"
maxTry = 10
standardExpireTime = 3600 * time.Second
redisENVKey = "_REDIS_URL_CORE"
cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis"
cacheDriverMem = "memory"
cacheDriverRedis = "redis"
cacheDriverRedisSentinel = "redis_sentinel"
cacheCollectionName = "helm_chart_cache"
maxTry = 10
)
// ChartCache is designed to cache some processed data for repeated accessing
@ -181,6 +182,27 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
return nil
}
hlog.Info("Enable redis cache for chart caching")
return redisCache
}
case cacheDriverRedisSentinel:
// New with retry
count := 0
for {
count++
redisCache, err := beego_cache.NewCache(cacheDriverRedisSentinel, cacheConfig.Config)
if err != nil {
// Just logged
hlog.Errorf("Failed to initialize redis cache: %s", err)
if count < maxTry {
<-time.After(time.Duration(backoff(count)) * time.Second)
continue
}
return nil
}
hlog.Info("Enable redis cache for chart caching")
return redisCache
}

View File

@ -0,0 +1,250 @@
package chartserver
import (
"encoding/json"
"errors"
"fmt"
"github.com/FZambia/sentinel"
"strconv"
"time"
"github.com/gomodule/redigo/redis"
"github.com/astaxie/beego/cache"
"strings"
)
var (
// DefaultKey the collection name of redis for cache adapter.
DefaultKey = "beecacheRedis"
)
// Cache is Redis cache adapter.
type Cache struct {
p *redis.Pool // redis connection pool
conninfo string
dbNum int
key string
password string
maxIdle int
masterName string
}
// NewRedisCache create new redis cache with default collection name.
func NewRedisCache() cache.Cache {
return &Cache{key: DefaultKey}
}
// actually do the redis cmds, args[0] must be the key name.
func (rc *Cache) do(commandName string, args ...interface{}) (reply interface{}, err error) {
if len(args) < 1 {
return nil, errors.New("missing required arguments")
}
args[0] = rc.associate(args[0])
c := rc.p.Get()
defer c.Close()
return c.Do(commandName, args...)
}
// associate with config key.
func (rc *Cache) associate(originKey interface{}) string {
return fmt.Sprintf("%s:%s", rc.key, originKey)
}
// Get cache from redis.
func (rc *Cache) Get(key string) interface{} {
if v, err := rc.do("GET", key); err == nil {
return v
}
return nil
}
// GetMulti get cache from redis.
func (rc *Cache) GetMulti(keys []string) []interface{} {
c := rc.p.Get()
defer c.Close()
var args []interface{}
for _, key := range keys {
args = append(args, rc.associate(key))
}
values, err := redis.Values(c.Do("MGET", args...))
if err != nil {
return nil
}
return values
}
// Put put cache to redis.
func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error {
_, err := rc.do("SETEX", key, int64(timeout/time.Second), val)
return err
}
// Delete delete cache in redis.
func (rc *Cache) Delete(key string) error {
_, err := rc.do("DEL", key)
return err
}
// IsExist check cache's existence in redis.
func (rc *Cache) IsExist(key string) bool {
v, err := redis.Bool(rc.do("EXISTS", key))
if err != nil {
return false
}
return v
}
// Incr increase counter in redis.
func (rc *Cache) Incr(key string) error {
_, err := redis.Bool(rc.do("INCRBY", key, 1))
return err
}
// Decr decrease counter in redis.
func (rc *Cache) Decr(key string) error {
_, err := redis.Bool(rc.do("INCRBY", key, -1))
return err
}
// ClearAll clean all cache in redis. delete this redis collection.
func (rc *Cache) ClearAll() error {
c := rc.p.Get()
defer c.Close()
cachedKeys, err := redis.Strings(c.Do("KEYS", rc.key+":*"))
if err != nil {
return err
}
for _, str := range cachedKeys {
if _, err = c.Do("DEL", str); err != nil {
return err
}
}
return err
}
// StartAndGC start redis cache adapter.
// config is like {"key":"collection key","conn":"connection info","dbNum":"0","masterName":"mymaster"}
// the cache item in redis are stored forever,
// so no gc operation.
func (rc *Cache) StartAndGC(config string) error {
var cf map[string]string
json.Unmarshal([]byte(config), &cf)
if _, ok := cf["key"]; !ok {
cf["key"] = DefaultKey
}
if _, ok := cf["masterName"]; !ok {
return errors.New("config has no masterName")
}
if _, ok := cf["conn"]; !ok {
return errors.New("config has no conn key")
}
// Format redis://<password>@<host>:<port>
cf["conn"] = strings.Replace(cf["conn"], "redis://", "", 1)
cf["conn"] = strings.Replace(cf["conn"], "redis_sentinel://", "", 1)
if i := strings.Index(cf["conn"], "@"); i > -1 {
cf["password"] = cf["conn"][0:i]
cf["conn"] = cf["conn"][i+1:]
}
if _, ok := cf["dbNum"]; !ok {
cf["dbNum"] = "0"
}
if _, ok := cf["password"]; !ok {
cf["password"] = ""
}
if _, ok := cf["maxIdle"]; !ok {
cf["maxIdle"] = "3"
}
rc.key = cf["key"]
rc.masterName = cf["masterName"]
rc.conninfo = cf["conn"]
rc.dbNum, _ = strconv.Atoi(cf["dbNum"])
rc.password = cf["password"]
rc.maxIdle, _ = strconv.Atoi(cf["maxIdle"])
rc.connectInit()
c := rc.p.Get()
defer c.Close()
return c.Err()
}
// connect to redis.
func (rc *Cache) connectInit() {
dialFunc := func() (c redis.Conn, err error) {
c, err = redis.Dial("tcp", rc.conninfo)
if err != nil {
return nil, err
}
if rc.password != "" {
if _, err := c.Do("AUTH", rc.password); err != nil {
c.Close()
return nil, err
}
}
_, selecterr := c.Do("SELECT", rc.dbNum)
if selecterr != nil {
c.Close()
return nil, selecterr
}
return
}
// initialize a new pool
rc.p = &redis.Pool{
MaxIdle: rc.maxIdle,
IdleTimeout: 180 * time.Second,
Dial: dialFunc,
}
var sentinelOptions []redis.DialOption
redisOptions := sentinelOptions
if rc.password != "" {
redisOptions = append(redisOptions, redis.DialPassword(rc.password))
}
redisOptions = append(redisOptions, redis.DialDatabase(rc.dbNum))
sntnl := &sentinel.Sentinel{
Addrs: strings.Split(rc.conninfo, ","),
MasterName: rc.masterName,
Dial: func(addr string) (redis.Conn, error) {
fmt.Println("chart dial redis sentinel:", addr)
c, err := redis.Dial("tcp", addr, sentinelOptions...)
if err != nil {
return nil, err
}
return c, nil
},
}
rc.p = &redis.Pool{
Dial: func() (redis.Conn, error) {
masterAddr, err := sntnl.MasterAddr()
if err != nil {
return nil, err
}
fmt.Println("chart dial redis master:", masterAddr, "db:", rc.dbNum)
return redis.Dial("tcp", masterAddr, redisOptions...)
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if !sentinel.TestRole(c, "master") {
return errors.New("role check failed")
}
return nil
},
MaxIdle: rc.maxIdle,
IdleTimeout: 180 * time.Second,
}
}
func init() {
cache.Register("redis_sentinel", NewRedisCache)
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"net/url"
"os"
"strconv"
"strings"
)
@ -35,62 +36,69 @@ func extractError(content []byte) (text string, err error) {
}
// Parse the redis configuration to the beego cache pattern
// Config pattern is "address:port[,weight,password,db_index]"
func parseRedisConfig(redisConfigV string) (string, error) {
// redis://:password@host:6379/1
// redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1
func parseRedisConfig(redisConfigV string) (map[string]string, error) {
if len(redisConfigV) == 0 {
return "", errors.New("empty redis config")
return nil, errors.New("empty redis config")
}
redisConfig := make(map[string]string)
redisConfig["key"] = cacheCollectionName
// Try best to parse the configuration segments.
// If the related parts are missing, assign default value.
// The default database index for UI process is 0.
configSegments := strings.Split(redisConfigV, ",")
for i, segment := range configSegments {
if i > 3 {
// ignore useless segments
break
}
switch i {
// address:port
case 0:
redisConfig["conn"] = segment
// password, may not exist
case 2:
redisConfig["password"] = segment
// database index, may not exist
case 3:
redisConfig["dbNum"] = segment
}
if strings.Index(redisConfigV, "//") < 0 {
redisConfigV = "redis://" + redisConfigV
}
// Assign default value
if len(redisConfig["dbNum"]) == 0 {
redisConfig["dbNum"] = "0"
}
// Try to validate the connection address
fullAddr := redisConfig["conn"]
if strings.Index(fullAddr, "://") == -1 {
// Append schema
fullAddr = fmt.Sprintf("redis://%s", fullAddr)
}
// Validate it by url
_, err := url.Parse(fullAddr)
u, err := url.Parse(redisConfigV)
if err != nil {
return "", err
return nil, fmt.Errorf("bad _REDIS_URL:%s", redisConfigV)
}
if u.Scheme == "redis+sentinel" {
ps := strings.Split(u.Path, "/")
if len(ps) < 2 {
return nil, fmt.Errorf("bad redis sentinel url: no master name, %s", redisConfigV)
}
if _, err := strconv.Atoi(ps[1]); err == nil {
return nil, fmt.Errorf("bad redis sentinel url: master name should not be a number, %s", redisConfigV)
}
redisConfig["conn"] = u.Host
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
redisConfig["password"] = password
}
}
if len(ps) > 2 {
if _, err := strconv.Atoi(ps[2]); err != nil {
return nil, fmt.Errorf("bad redis sentinel url: bad db, %s", redisConfigV)
}
redisConfig["dbNum"] = ps[2]
} else {
redisConfig["dbNum"] = "0"
}
redisConfig["masterName"] = ps[1]
} else if u.Scheme == "redis" {
redisConfig["conn"] = u.Host // host
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
redisConfig["password"] = password
}
}
if len(u.Path) > 1 {
if _, err := strconv.Atoi(u.Path[1:]); err != nil {
return nil, fmt.Errorf("bad redis url: bad db, %s", redisConfigV)
}
redisConfig["dbNum"] = u.Path[1:]
} else {
redisConfig["dbNum"] = "0"
}
} else {
return nil, fmt.Errorf("bad redis scheme, %s", redisConfigV)
}
// Convert config map to string
cfgData, err := json.Marshal(redisConfig)
if err != nil {
return "", err
}
return string(cfgData), nil
return redisConfig, nil
}
// What's the cache driver if it is set
@ -121,9 +129,18 @@ func getCacheConfig() (*ChartCacheConfig, error) {
if err != nil {
return nil, fmt.Errorf("failed to parse redis configurations from '%s' with error: %s", redisCfg, err)
}
if _, isSet := redisCfg["masterName"]; isSet {
driver = "redis_sentinel"
}
// Convert config map to string
cfgData, err := json.Marshal(redisCfg)
if err != nil {
return nil, fmt.Errorf("failed to parse redis configurations from '%s' with error: %s", redisCfg, err)
}
return &ChartCacheConfig{
DriverType: driver,
Config: redisCfg,
Config: string(cfgData),
}, nil
}

View File

@ -3,7 +3,6 @@ package chartserver
import (
"encoding/json"
"os"
"strings"
"testing"
)
@ -17,28 +16,55 @@ func TestParseRedisConfig(t *testing.T) {
// Case 2: short pattern, addr:port
redisAddr = "redis:6379"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConnStr)
if parsedConn, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConn)
}
// Case 3: long pattern but miss some parts
redisAddr = "redis:6379,100"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %s\n", parsedConnStr)
redisAddr = "redis:6379?idle_timeout_seconds=100"
if parsedConn, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %v\n", parsedConn)
} else {
if strings.Index(parsedConnStr, `"dbNum":"0"`) == -1 {
t.Fatalf("expect 'dbNum:0' in the parsed conn str but got nothing: %s\n", parsedConnStr)
if num, ok := parsedConn["dbNum"]; !ok || num != "0" {
t.Fatalf("expect 'dbNum:0' in the parsed conn str: %v\n", parsedConn)
}
}
// Case 4: long pattern
redisAddr = "redis:6379,100,Passw0rd,1"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
redisAddr = ":Passw0rd@redis:6379/1?idle_timeout_seconds=100"
if parsedConn, err := parseRedisConfig(redisAddr); err != nil {
t.Fatal("expect nil error but got non nil one if addr is long pattern")
} else {
if strings.Index(parsedConnStr, `"dbNum":"1"`) == -1 ||
strings.Index(parsedConnStr, `"password":"Passw0rd"`) == -1 {
t.Fatalf("expect 'dbNum:0' and 'password:Passw0rd' in the parsed conn str but got nothing: %s", parsedConnStr)
if num, ok := parsedConn["dbNum"]; !ok || num != "1" {
t.Fatalf("expect 'dbNum:1' in the parsed conn str: %v", parsedConn)
}
if p, ok := parsedConn["password"]; !ok || p != "Passw0rd" {
t.Fatalf("expect 'password:Passw0rd' in the parsed conn str: %v", parsedConn)
}
}
// Case 5: sentinel but miss master name
redisAddr = "redis+sentinel://:Passw0rd@redis1:26379,redis2:26379/1?idle_timeout_seconds=100"
if _, err := parseRedisConfig(redisAddr); err == nil {
t.Fatal("expect no master name error but got nil")
}
// Case 6: sentinel
redisAddr = "redis+sentinel://:Passw0rd@redis1:26379,redis2:26379/mymaster/1?idle_timeout_seconds=100"
if parsedConn, err := parseRedisConfig(redisAddr); err != nil {
t.Fatal("expect nil error but got non nil one if addr is long pattern")
} else {
if num, ok := parsedConn["dbNum"]; !ok || num != "1" {
t.Fatalf("expect 'dbNum:0' in the parsed conn str: %v", parsedConn)
}
if p, ok := parsedConn["password"]; !ok || p != "Passw0rd" {
t.Fatalf("expect 'password:Passw0rd' in the parsed conn str: %v", parsedConn)
}
if v, ok := parsedConn["masterName"]; !ok || v != "mymaster" {
t.Fatalf("expect 'masterName:mymaster' in the parsed conn str: %v", parsedConn)
}
if v, ok := parsedConn["conn"]; !ok || v != "redis1:26379,redis2:26379" {
t.Fatalf("expect 'conn:redis1:26379,redis2:26379' in the parsed conn str: %v", parsedConn)
}
}
}
@ -73,7 +99,7 @@ func TestGetCacheConfig(t *testing.T) {
}
// case 5: redis cache conf
os.Setenv(redisENVKey, "redis:6379,100,Passw0rd,1")
os.Setenv(redisENVKey, ":Passw0rd@redis:6379/1?idle_timeout_seconds=100")
redisConf, err := getCacheConfig()
if err != nil {
t.Fatalf("expect nil error but got non-nil one when parsing valid redis conf")

View File

@ -140,7 +140,9 @@ var (
{Name: common.OIDCClientSecret, Scope: UserScope, Group: OIDCGroup, ItemType: &PasswordType{}},
{Name: common.OIDCGroupsClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCScope, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCUserClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCVerifyCert, Scope: UserScope, Group: OIDCGroup, DefaultValue: "true", ItemType: &BoolType{}},
{Name: common.OIDCAutoOnboard, Scope: UserScope, Group: OIDCGroup, DefaultValue: "false", ItemType: &BoolType{}},
{Name: common.WithChartMuseum, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CHARTMUSEUM", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
{Name: common.WithClair, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CLAIR", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},

View File

@ -106,7 +106,9 @@ const (
OIDCClientSecret = "oidc_client_secret"
OIDCVerifyCert = "oidc_verify_cert"
OIDCGroupsClaim = "oidc_groups_claim"
OIDCAutoOnboard = "oidc_auto_onboard"
OIDCScope = "oidc_scope"
OIDCUserClaim = "oidc_user_claim"
CfgDriverDB = "db"
NewHarborAdminName = "admin@harbor.local"

View File

@ -81,11 +81,13 @@ type OIDCSetting struct {
Name string `json:"name"`
Endpoint string `json:"endpoint"`
VerifyCert bool `json:"verify_cert"`
AutoOnboard bool `json:"auto_onboard"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
GroupsClaim string `json:"groups_claim"`
RedirectURL string `json:"redirect_url"`
Scope []string `json:"scope"`
UserClaim string `json:"user_claim"`
}
// QuotaSetting wraps the settings for Quota

View File

@ -19,16 +19,17 @@ import (
"crypto/tls"
"errors"
"fmt"
gooidc "github.com/coreos/go-oidc"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/lib/log"
"golang.org/x/oauth2"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
gooidc "github.com/coreos/go-oidc"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/lib/log"
"golang.org/x/oauth2"
)
const (
@ -294,7 +295,7 @@ func userInfoFromRemote(ctx context.Context, token *Token, setting models.OIDCSe
if err != nil {
return nil, err
}
return userInfoFromClaims(u, setting.GroupsClaim)
return userInfoFromClaims(u, setting.GroupsClaim, setting.UserClaim)
}
func userInfoFromIDToken(ctx context.Context, token *Token, setting models.OIDCSetting) (*UserInfo, error) {
@ -305,14 +306,28 @@ func userInfoFromIDToken(ctx context.Context, token *Token, setting models.OIDCS
if err != nil {
return nil, err
}
return userInfoFromClaims(idt, setting.GroupsClaim)
return userInfoFromClaims(idt, setting.GroupsClaim, setting.UserClaim)
}
func userInfoFromClaims(c claimsProvider, g string) (*UserInfo, error) {
func userInfoFromClaims(c claimsProvider, g, u string) (*UserInfo, error) {
res := &UserInfo{}
if err := c.Claims(res); err != nil {
return nil, err
}
if u != "" {
allClaims := make(map[string]interface{})
if err := c.Claims(&allClaims); err != nil {
return nil, err
}
username, ok := allClaims[u].(string)
if !ok {
return nil, fmt.Errorf("OIDC. Failed to recover Username from claim. Claim '%s' is invalid or not a string", u)
}
res.Username = username
}
res.Groups, res.hasGroupClaim = GroupsFromClaims(c, g)
return res, nil
}

View File

@ -175,6 +175,7 @@ func TestUserInfoFromClaims(t *testing.T) {
s := []struct {
input map[string]interface{}
groupClaim string
userClaim string
expect *UserInfo
}{
{
@ -184,6 +185,7 @@ func TestUserInfoFromClaims(t *testing.T) {
"groups": []interface{}{"g1", "g2"},
},
groupClaim: "grouplist",
userClaim: "",
expect: &UserInfo{
Issuer: "",
Subject: "",
@ -200,6 +202,7 @@ func TestUserInfoFromClaims(t *testing.T) {
"groups": []interface{}{"g1", "g2"},
},
groupClaim: "groups",
userClaim: "",
expect: &UserInfo{
Issuer: "",
Subject: "",
@ -218,6 +221,7 @@ func TestUserInfoFromClaims(t *testing.T) {
"groupclaim": []interface{}{},
},
groupClaim: "groupclaim",
userClaim: "",
expect: &UserInfo{
Issuer: "issuer",
Subject: "subject000",
@ -227,9 +231,26 @@ func TestUserInfoFromClaims(t *testing.T) {
hasGroupClaim: true,
},
},
{
input: map[string]interface{}{
"name": "Alvaro",
"email": "airadier@gmail.com",
"groups": []interface{}{"g1", "g2"},
},
groupClaim: "grouplist",
userClaim: "email",
expect: &UserInfo{
Issuer: "",
Subject: "",
Username: "airadier@gmail.com",
Email: "airadier@gmail.com",
Groups: []string{},
hasGroupClaim: false,
},
},
}
for _, tc := range s {
out, err := userInfoFromClaims(&fakeClaims{tc.input}, tc.groupClaim)
out, err := userInfoFromClaims(&fakeClaims{tc.input}, tc.groupClaim, tc.userClaim)
assert.Nil(t, err)
assert.Equal(t, *tc.expect, *out)
}

View File

@ -1,232 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"errors"
"fmt"
"os"
"strconv"
"sync"
"time"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/lib/log"
)
var (
// ErrUnLock ...
ErrUnLock = errors.New("error to release the redis lock")
)
const (
unlockScript = `
if redis.call("get",KEYS[1]) == ARGV[1] then
return redis.call("del",KEYS[1])
else
return 0
end
`
)
// Mutex ...
type Mutex struct {
Conn redis.Conn
key string
value string
opts Options
}
// New ...
func New(conn redis.Conn, key, value string) *Mutex {
o := *DefaultOptions()
if value == "" {
value = utils.GenerateRandomString()
}
return &Mutex{conn, key, value, o}
}
// Require retry to require the lock
func (rm *Mutex) Require() (bool, error) {
var isRequired bool
var err error
for i := 0; i < rm.opts.maxRetry; i++ {
isRequired, err = rm.require()
if isRequired {
break
}
if err != nil || !isRequired {
time.Sleep(rm.opts.retryDelay)
}
}
return isRequired, err
}
// require get the redis lock, for details, just refer to https://redis.io/topics/distlock
func (rm *Mutex) require() (bool, error) {
reply, err := redis.String(rm.Conn.Do("SET", rm.key, rm.value, "NX", "PX", int(rm.opts.expiry/time.Millisecond)))
if err != nil {
return false, err
}
return reply == "OK", nil
}
// Free releases the lock, for details, just refer to https://redis.io/topics/distlock
func (rm *Mutex) Free() (bool, error) {
script := redis.NewScript(1, unlockScript)
resp, err := redis.Int(script.Do(rm.Conn, rm.key, rm.value))
if err != nil {
return false, err
}
if resp == 0 {
return false, ErrUnLock
}
return true, nil
}
// Options ...
type Options struct {
retryDelay time.Duration
expiry time.Duration
maxRetry int
}
var (
opt *Options
optOnce sync.Once
defaultDelay = int64(1) // 1 second
defaultMaxRetry = 600
defaultExpire = int64(2 * time.Hour / time.Second) // 2 hours
)
// DefaultOptions ...
func DefaultOptions() *Options {
optOnce.Do(func() {
retryDelay, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_RETRY_DELAY"), 10, 64)
if err != nil || retryDelay < 0 {
retryDelay = defaultDelay
}
maxRetry, err := strconv.Atoi(os.Getenv("REDIS_LOCK_MAX_RETRY"))
if err != nil || maxRetry < 0 {
maxRetry = defaultMaxRetry
}
expire, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_EXPIRE"), 10, 64)
if err != nil || expire < 0 {
expire = defaultExpire
}
opt = &Options{
retryDelay: time.Duration(retryDelay) * time.Second,
expiry: time.Duration(expire) * time.Second,
maxRetry: maxRetry,
}
})
return opt
}
var (
pool *redis.Pool
poolOnce sync.Once
poolMaxIdle = 200
poolMaxActive = 1000
poolIdleTimeout int64 = 180
)
// DefaultPool return default redis pool
func DefaultPool() *redis.Pool {
poolOnce.Do(func() {
maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE"))
if err != nil || maxIdle < 0 {
maxIdle = poolMaxIdle
}
maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE"))
if err != nil || maxActive < 0 {
maxActive = poolMaxActive
}
idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64)
if err != nil || idleTimeout < 0 {
idleTimeout = poolIdleTimeout
}
pool = &redis.Pool{
Dial: func() (redis.Conn, error) {
url := config.GetRedisOfRegURL()
if url == "" {
url = "redis://localhost:6379/1"
}
return redis.DialURL(url)
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
MaxIdle: maxIdle,
MaxActive: maxActive,
IdleTimeout: time.Duration(idleTimeout) * time.Second,
Wait: true,
}
})
return pool
}
// RequireLock returns lock by key
func RequireLock(key string, conns ...redis.Conn) (*Mutex, error) {
var conn redis.Conn
if len(conns) > 0 {
conn = conns[0]
} else {
conn = DefaultPool().Get()
}
m := New(conn, key, utils.GenerateRandomString())
ok, err := m.Require()
if err != nil {
return nil, fmt.Errorf("require redis lock failed: %v", err)
}
if !ok {
return nil, fmt.Errorf("unable to require lock for %s", key)
}
return m, nil
}
// FreeLock free lock
func FreeLock(m *Mutex) error {
if _, err := m.Free(); err != nil {
log.Warningf("failed to free lock %s, error: %v", m.key, err)
return err
}
if err := m.Conn.Close(); err != nil {
log.Warningf("failed to close the redis con for lock %s, error: %v", m.key, err)
return err
}
return nil
}

View File

@ -1,102 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"fmt"
"os"
"testing"
"time"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/utils"
"github.com/stretchr/testify/assert"
)
const testingRedisHost = "REDIS_HOST"
func init() {
os.Setenv("REDIS_LOCK_MAX_RETRY", "5")
}
func TestRedisLock(t *testing.T) {
con, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
assert.Nil(t, err)
defer con.Close()
rm := New(con, "test-redis-lock", "test-value")
successLock, err := rm.Require()
assert.Nil(t, err)
assert.True(t, successLock)
time.Sleep(2 * time.Second)
_, err = rm.Require()
assert.NotNil(t, err)
successUnLock, err := rm.Free()
assert.Nil(t, err)
assert.True(t, successUnLock)
}
func TestRequireLock(t *testing.T) {
assert := assert.New(t)
conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
assert.Nil(err)
defer conn.Close()
if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) {
l.Free()
}
if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) {
FreeLock(l)
}
key := utils.GenerateRandomString()
if l, err := RequireLock(key); assert.Nil(err) {
defer FreeLock(l)
_, err = RequireLock(key)
assert.Error(err)
}
}
func TestFreeLock(t *testing.T) {
assert := assert.New(t)
if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) {
assert.Nil(FreeLock(l))
}
conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
assert.Nil(err)
if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) {
conn.Close()
assert.Error(FreeLock(l))
}
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
}
return redisHost
}

View File

@ -17,14 +17,15 @@ package blob
import (
"context"
"fmt"
"github.com/docker/distribution"
"github.com/garyburd/redigo/redis"
util "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/lib/orm"
redislib "github.com/goharbor/harbor/src/lib/redis"
"github.com/goharbor/harbor/src/pkg/blob"
blob_models "github.com/goharbor/harbor/src/pkg/blob/models"
"github.com/gomodule/redigo/redis"
)
var (
@ -78,6 +79,9 @@ type Controller interface {
// Touch updates the blob status to StatusNone and increase version every time.
Touch(ctx context.Context, blob *blob.Blob) error
// Fail updates the blob status to StatusDeleteFailed and increase version every time.
Fail(ctx context.Context, blob *blob.Blob) error
// Update updates the blob, it cannot handle blob status transitions.
Update(ctx context.Context, blob *blob.Blob) error
@ -290,7 +294,7 @@ func (c *controller) Sync(ctx context.Context, references []distribution.Descrip
}
func (c *controller) SetAcceptedBlobSize(sessionID string, size int64) error {
conn := util.DefaultPool().Get()
conn := redislib.DefaultPool().Get()
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", sessionID)
@ -307,7 +311,7 @@ func (c *controller) SetAcceptedBlobSize(sessionID string, size int64) error {
}
func (c *controller) GetAcceptedBlobSize(sessionID string) (int64, error) {
conn := util.DefaultPool().Get()
conn := redislib.DefaultPool().Get()
defer conn.Close()
key := fmt.Sprintf("upload:%s:size", sessionID)
@ -335,6 +339,18 @@ func (c *controller) Touch(ctx context.Context, blob *blob.Blob) error {
return nil
}
func (c *controller) Fail(ctx context.Context, blob *blob.Blob) error {
blob.Status = blob_models.StatusDeleteFailed
count, err := c.blobMgr.UpdateBlobStatus(ctx, blob)
if err != nil {
return err
}
if count == 0 {
return errors.New(nil).WithMessage(fmt.Sprintf("no blob item is updated to StatusDeleteFailed, id:%d, digest:%s", blob.ID, blob.Digest)).WithCode(errors.NotFoundCode)
}
return nil
}
func (c *controller) Update(ctx context.Context, blob *blob.Blob) error {
return c.blobMgr.Update(ctx, blob)
}

View File

@ -292,6 +292,40 @@ func (suite *ControllerTestSuite) TestTouch() {
suite.Equal(blob.Status, models.StatusNone)
}
func (suite *ControllerTestSuite) TestFail() {
ctx := suite.Context()
err := Ctl.Fail(ctx, &blob.Blob{
Status: models.StatusNone,
})
suite.NotNil(err)
suite.True(errors.IsNotFoundErr(err))
digest := suite.prepareBlob()
blob, err := Ctl.Get(ctx, digest)
suite.Nil(err)
blob.Status = models.StatusDelete
_, err = pkg_blob.Mgr.UpdateBlobStatus(suite.Context(), blob)
suite.Nil(err)
// StatusDelete cannot be marked as StatusDeleteFailed
err = Ctl.Fail(ctx, blob)
suite.NotNil(err)
suite.True(errors.IsNotFoundErr(err))
blob.Status = models.StatusDeleting
_, err = pkg_blob.Mgr.UpdateBlobStatus(suite.Context(), blob)
suite.Nil(err)
err = Ctl.Fail(ctx, blob)
suite.Nil(err)
blobAfter, err := Ctl.Get(ctx, digest)
suite.Nil(err)
suite.Equal(models.StatusDeleteFailed, blobAfter.Status)
}
func (suite *ControllerTestSuite) TestDelete() {
ctx := suite.Context()

View File

@ -19,15 +19,15 @@ import (
"fmt"
"time"
"github.com/garyburd/redigo/redis"
util "github.com/goharbor/harbor/src/common/utils/redis"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q"
redislib "github.com/goharbor/harbor/src/lib/redis"
"github.com/goharbor/harbor/src/pkg/quota"
"github.com/goharbor/harbor/src/pkg/quota/driver"
"github.com/goharbor/harbor/src/pkg/quota/types"
"github.com/gomodule/redigo/redis"
// quota driver
_ "github.com/goharbor/harbor/src/controller/quota/driver"
@ -127,7 +127,7 @@ func (c *controller) List(ctx context.Context, query *q.Query) ([]*quota.Quota,
}
func (c *controller) getReservedResources(ctx context.Context, reference, referenceID string) (types.ResourceList, error) {
conn := util.DefaultPool().Get()
conn := redislib.DefaultPool().Get()
defer conn.Close()
key := reservedResourcesKey(reference, referenceID)
@ -143,7 +143,7 @@ func (c *controller) getReservedResources(ctx context.Context, reference, refere
}
func (c *controller) setReservedResources(ctx context.Context, reference, referenceID string, resources types.ResourceList) error {
conn := util.DefaultPool().Get()
conn := redislib.DefaultPool().Get()
defer conn.Close()
key := reservedResourcesKey(reference, referenceID)

View File

@ -184,13 +184,11 @@ func Init() error {
// init project manager
initProjectManager()
initRetentionScheduler()
retentionMgr = retention.NewManager()
retentionLauncher = retention.NewLauncher(projectMgr, repository.Mgr, retentionMgr)
retentionController = retention.NewAPIController(retentionMgr, projectMgr, repository.Mgr, retentionScheduler, retentionLauncher)
retentionController = retention.NewAPIController(retentionMgr, projectMgr, repository.Mgr, scheduler.Sched, retentionLauncher)
callbackFun := func(p interface{}) error {
str, ok := p.(string)
@ -204,7 +202,7 @@ func Init() error {
_, err := retentionController.TriggerRetentionExec(param.PolicyID, param.Trigger, false)
return err
}
err := scheduler.Register(retention.SchedulerCallback, callbackFun)
err := scheduler.RegisterCallbackFunc(retention.SchedulerCallback, callbackFun)
return err
}
@ -227,7 +225,3 @@ func initChartController() error {
func initProjectManager() {
projectMgr = project.Mgr
}
func initRetentionScheduler() {
retentionScheduler = scheduler.GlobalScheduler
}

View File

@ -28,7 +28,7 @@ import (
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/lib/log"
"github.com/gomodule/redigo/redis"
"github.com/goharbor/harbor/src/lib/redis"
)
var (
@ -260,22 +260,10 @@ func databaseHealthChecker() health.Checker {
}
func redisHealthChecker() health.Checker {
url := config.GetRedisOfRegURL()
timeout := 60 * time.Second
period := 10 * time.Second
checker := health.CheckFunc(func() error {
conn, err := redis.DialURL(url,
redis.DialConnectTimeout(timeout*time.Second),
redis.DialReadTimeout(timeout*time.Second),
redis.DialWriteTimeout(timeout*time.Second))
if err != nil {
return fmt.Errorf("failed to establish connection with Redis: %v", err)
}
conn := redis.DefaultPool().Get()
defer conn.Close()
_, err = conn.Do("PING")
if err != nil {
return fmt.Errorf("failed to run \"PING\": %v", err)
}
return nil
})
return PeriodicHealthChecker(checker, period)

View File

@ -6,7 +6,6 @@ import (
"net/http"
"strconv"
common_http "github.com/goharbor/harbor/src/common/http"
common_models "github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/core/api/models"
@ -111,21 +110,12 @@ func (t *RegistryAPI) Ping() {
return
}
status, err := registry.CheckHealthStatus(reg)
if err != nil {
e, ok := err.(*common_http.Error)
if ok && e.Code == http.StatusUnauthorized {
t.SendBadRequestError(errors.New("invalid credential"))
return
}
t.SendInternalServerError(fmt.Errorf("failed to check health of registry %s: %v", reg.URL, err))
status := t.getHealthStatus(reg)
if status != model.Healthy {
t.SendBadRequestError(errors.New("the registry is unhealthy"))
return
}
if status != model.Healthy {
t.SendBadRequestError(errors.New(""))
return
}
return
}
@ -226,13 +216,9 @@ func (t *RegistryAPI) Post() {
// Prevent SSRF security issue #3755
r.URL = url.Scheme + "://" + url.Host + url.Path
status, err := registry.CheckHealthStatus(r)
if err != nil {
t.SendBadRequestError(fmt.Errorf("health check to registry %s failed: %v", r.URL, err))
return
}
status := t.getHealthStatus(r)
if status != model.Healthy {
t.SendBadRequestError(fmt.Errorf("registry %s is unhealthy: %s", r.URL, status))
t.SendBadRequestError(errors.New("the registry is unhealthy"))
return
}
@ -247,6 +233,15 @@ func (t *RegistryAPI) Post() {
t.Redirect(http.StatusCreated, strconv.FormatInt(id, 10))
}
func (t *RegistryAPI) getHealthStatus(r *model.Registry) string {
status, err := registry.CheckHealthStatus(r)
if err != nil {
log.Errorf("failed to check the health status of registry %s: %v", r.URL, err)
return model.Unhealthy
}
return string(status)
}
// Put updates a registry
func (t *RegistryAPI) Put() {
id, err := t.GetIDFromURL()
@ -313,13 +308,9 @@ func (t *RegistryAPI) Put() {
}
}
status, err := registry.CheckHealthStatus(r)
if err != nil {
t.SendBadRequestError(fmt.Errorf("health check to registry %s failed: %v", r.URL, err))
return
}
status := t.getHealthStatus(r)
if status != model.Healthy {
t.SendBadRequestError(fmt.Errorf("registry %s is unhealthy: %s", r.URL, status))
t.SendBadRequestError(errors.New("the registry is unhealthy"))
return
}

View File

@ -440,11 +440,13 @@ func OIDCSetting() (*models.OIDCSetting, error) {
Name: cfgMgr.Get(common.OIDCName).GetString(),
Endpoint: cfgMgr.Get(common.OIDCEndpoint).GetString(),
VerifyCert: cfgMgr.Get(common.OIDCVerifyCert).GetBool(),
AutoOnboard: cfgMgr.Get(common.OIDCAutoOnboard).GetBool(),
ClientID: cfgMgr.Get(common.OIDCCLientID).GetString(),
ClientSecret: cfgMgr.Get(common.OIDCClientSecret).GetString(),
GroupsClaim: cfgMgr.Get(common.OIDCGroupsClaim).GetString(),
RedirectURL: extEndpoint + common.OIDCCallbackPath,
Scope: scope,
UserClaim: cfgMgr.Get(common.OIDCUserClaim).GetString(),
}, nil
}

View File

@ -253,8 +253,10 @@ func TestOIDCSetting(t *testing.T) {
common.OIDCName: "test",
common.OIDCEndpoint: "https://oidc.test",
common.OIDCVerifyCert: "true",
common.OIDCAutoOnboard: "false",
common.OIDCScope: "openid, profile",
common.OIDCGroupsClaim: "my_group",
common.OIDCUserClaim: "username",
common.OIDCCLientID: "client",
common.OIDCClientSecret: "secret",
common.ExtEndpoint: "https://harbor.test",
@ -266,8 +268,10 @@ func TestOIDCSetting(t *testing.T) {
assert.Equal(t, "https://oidc.test", v.Endpoint)
assert.True(t, v.VerifyCert)
assert.Equal(t, "my_group", v.GroupsClaim)
assert.False(t, v.AutoOnboard)
assert.Equal(t, "client", v.ClientID)
assert.Equal(t, "secret", v.ClientSecret)
assert.Equal(t, "https://harbor.test/c/oidc/callback", v.RedirectURL)
assert.ElementsMatch(t, []string{"openid", "profile"}, v.Scope)
assert.Equal(t, "username", v.UserClaim)
}

View File

@ -17,10 +17,11 @@ package controllers
import (
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/common/dao/group"
"net/http"
"strings"
"github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
@ -122,30 +123,99 @@ func (oc *OIDCController) Callback() {
}
oc.SetSession(tokenKey, tokenBytes)
if u == nil {
oc.SetSession(userInfoKey, string(ouDataStr))
oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", strings.Replace(info.Username, " ", "_", -1)),
http.StatusFound)
} else {
gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType))
if err != nil {
log.Warningf("Failed to populate groups, error: %v, user will have empty group list, username: %s", err, info.Username)
}
u.GroupIDs = gids
oidcUser, err := dao.GetOIDCUserByUserID(u.UserID)
if err != nil {
oc.SendInternalServerError(err)
return
}
_, t, err := secretAndToken(tokenBytes)
oidcUser.Token = t
if err := dao.UpdateOIDCUser(oidcUser); err != nil {
oc.SendInternalServerError(err)
return
}
oc.PopulateUserSession(*u)
oc.Controller.Redirect("/", http.StatusFound)
oidcSettings, err := config.OIDCSetting()
if err != nil {
oc.SendInternalServerError(err)
return
}
if u == nil {
// Recover the username from d.Username by default
username := info.Username
// Fix blanks in username
username = strings.Replace(username, " ", "_", -1)
// If automatic onboard is enabled, skip the onboard page
if oidcSettings.AutoOnboard {
log.Debug("Doing automatic onboarding\n")
user, onboarded := userOnboard(oc, info, username, tokenBytes)
if onboarded == false {
log.Error("User not onboarded\n")
return
}
log.Debug("User automatically onboarded\n")
u = user
} else {
oc.SetSession(userInfoKey, string(ouDataStr))
oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", username), http.StatusFound)
// Once redirected, no further actions are done
return
}
}
gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType))
if err != nil {
log.Warningf("Failed to populate groups, error: %v, user will have empty group list, username: %s", err, info.Username)
}
u.GroupIDs = gids
oidcUser, err := dao.GetOIDCUserByUserID(u.UserID)
if err != nil {
oc.SendInternalServerError(err)
return
}
_, t, err := secretAndToken(tokenBytes)
oidcUser.Token = t
if err := dao.UpdateOIDCUser(oidcUser); err != nil {
oc.SendInternalServerError(err)
return
}
oc.PopulateUserSession(*u)
oc.Controller.Redirect("/", http.StatusFound)
}
func userOnboard(oc *OIDCController, info *oidc.UserInfo, username string, tokenBytes []byte) (*models.User, bool) {
s, t, err := secretAndToken(tokenBytes)
if err != nil {
oc.SendInternalServerError(err)
return nil, false
}
gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType))
if err != nil {
log.Warningf("Failed to populate group user will have empty group list. username: %s", username)
}
oidcUser := models.OIDCUser{
SubIss: info.Subject + info.Issuer,
Secret: s,
Token: t,
}
user := models.User{
Username: username,
Realname: username,
Email: info.Email,
GroupIDs: gids,
OIDCUserMeta: &oidcUser,
Comment: oidcUserComment,
}
log.Debugf("User created: %+v\n", user)
err = dao.OnBoardOIDCUser(&user)
if err != nil {
if strings.Contains(err.Error(), dao.ErrDupUser.Error()) {
oc.RenderError(http.StatusConflict, "Conflict, the user with same username or email has been onboarded.")
return nil, false
}
oc.SendInternalServerError(err)
return nil, false
}
return &user, true
}
// Onboard handles the request to onboard a user authenticated via OIDC provider
@ -176,51 +246,20 @@ func (oc *OIDCController) Onboard() {
oc.SendBadRequestError(errors.New("Failed to get OIDC token from session"))
return
}
s, t, err := secretAndToken(tb)
if err != nil {
oc.SendInternalServerError(err)
return
}
d := &oidc.UserInfo{}
err = json.Unmarshal([]byte(userInfoStr), &d)
err := json.Unmarshal([]byte(userInfoStr), &d)
if err != nil {
oc.SendInternalServerError(err)
return
}
gids, err := group.PopulateGroup(models.UserGroupsFromName(d.Groups, common.OIDCGroupType))
if err != nil {
log.Warningf("Failed to populate group user will have empty group list. username: %s", username)
}
oidcUser := models.OIDCUser{
SubIss: d.Subject + d.Issuer,
Secret: s,
Token: t,
}
email := d.Email
user := models.User{
Username: username,
Realname: d.Username,
Email: email,
GroupIDs: gids,
OIDCUserMeta: &oidcUser,
Comment: oidcUserComment,
}
err = dao.OnBoardOIDCUser(&user)
if err != nil {
if strings.Contains(err.Error(), dao.ErrDupUser.Error()) {
oc.RenderError(http.StatusConflict, "Conflict, the user with same username or email has been onboarded.")
return
}
oc.SendInternalServerError(err)
if user, onboarded := userOnboard(oc, d, username, tb); onboarded {
user.OIDCUserMeta = nil
oc.DelSession(userInfoKey)
return
oc.PopulateUserSession(*user)
}
user.OIDCUserMeta = nil
oc.DelSession(userInfoKey)
oc.PopulateUserSession(user)
}
func secretAndToken(tokenBytes []byte) (string, string, error) {

View File

@ -17,14 +17,17 @@ package main
import (
"encoding/gob"
"fmt"
"net/url"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
_ "github.com/astaxie/beego/session/redis_sentinel"
"github.com/goharbor/harbor/src/common/dao"
common_http "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/models"
@ -45,7 +48,6 @@ import (
_ "github.com/goharbor/harbor/src/pkg/notifier/topic"
"github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/version"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/server"
@ -99,11 +101,61 @@ func main() {
beego.BConfig.WebConfig.Session.SessionOn = true
beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName
redisURL := os.Getenv("_REDIS_URL")
redisURL := os.Getenv("_REDIS_URL_CORE")
if len(redisURL) > 0 {
u, err := url.Parse(redisURL)
if err != nil {
panic("bad _REDIS_URL:" + redisURL)
}
gob.Register(models.User{})
beego.BConfig.WebConfig.Session.SessionProvider = "redis"
beego.BConfig.WebConfig.Session.SessionProviderConfig = redisURL
if u.Scheme == "redis+sentinel" {
ps := strings.Split(u.Path, "/")
if len(ps) < 2 {
panic("bad redis sentinel url: no master name")
}
ss := make([]string, 5)
ss[0] = strings.Join(strings.Split(u.Host, ","), ";") // host
ss[1] = "100" // pool
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
ss[2] = password
}
}
if len(ps) > 2 {
db, err := strconv.Atoi(ps[2])
if err != nil {
panic("bad redis sentinel url: bad db")
}
if db != 0 {
ss[3] = ps[2]
}
}
ss[4] = ps[1] // monitor name
beego.BConfig.WebConfig.Session.SessionProvider = "redis_sentinel"
beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",")
} else {
ss := make([]string, 5)
ss[0] = u.Host // host
ss[1] = "100" // pool
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
ss[2] = password
}
}
if len(u.Path) > 1 {
if _, err := strconv.Atoi(u.Path[1:]); err != nil {
panic("bad redis url: bad db")
}
ss[3] = u.Path[1:]
}
ss[4] = u.Query().Get("idle_timeout_seconds")
beego.BConfig.WebConfig.Session.SessionProvider = "redis"
beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",")
}
}
beego.AddTemplateExt("htm")
@ -125,9 +177,6 @@ func main() {
log.Fatalf("failed to load config: %v", err)
}
// init the scheduler
scheduler.Init()
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initial password: %v", err)

View File

@ -16,13 +16,11 @@ package scheduler
import (
"encoding/json"
"fmt"
"github.com/goharbor/harbor/src/core/service/notifications"
"github.com/goharbor/harbor/src/common/job/models"
"github.com/goharbor/harbor/src/core/service/notifications"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/scheduler/hook"
)
// Handler handles the scheduler requests
@ -34,46 +32,20 @@ type Handler struct {
func (h *Handler) Handle() {
log.Debugf("received scheduler hook event for schedule %s", h.GetStringFromPath(":id"))
var data models.JobStatusChange
var data job.StatusChange
if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &data); err != nil {
log.Errorf("failed to decode hook event: %v", err)
return
}
// status update
if len(data.CheckIn) == 0 {
schedulerID, err := h.GetInt64FromPath(":id")
if err != nil {
log.Errorf("failed to get the schedule ID: %v", err)
return
}
if err := hook.GlobalController.UpdateStatus(schedulerID, data.Status); err != nil {
h.SendInternalServerError(fmt.Errorf("failed to update status of job %s: %v", data.JobID, err))
return
}
log.Debugf("handle status update hook event for schedule %s completed", h.GetStringFromPath(":id"))
schedulerID, err := h.GetInt64FromPath(":id")
if err != nil {
log.Errorf("failed to get the schedule ID: %v", err)
return
}
// run callback function
// just log the error message when handling check in request if got any error
params := map[string]interface{}{}
if err := json.Unmarshal([]byte(data.CheckIn), &params); err != nil {
log.Errorf("failed to unmarshal parameters from check in message: %v", err)
if err = scheduler.HandleLegacyHook(h.Ctx.Request.Context(), schedulerID, &data); err != nil {
log.Errorf("failed to handle the legacy hook: %v", err)
return
}
callbackFuncNameParam, exist := params[scheduler.JobParamCallbackFunc]
if !exist {
log.Error("cannot get the parameter \"callback_func_name\" from the check in message")
return
}
callbackFuncName, ok := callbackFuncNameParam.(string)
if !ok || len(callbackFuncName) == 0 {
log.Errorf("invalid \"callback_func_name\": %v", callbackFuncName)
return
}
if err := hook.GlobalController.Run(callbackFuncName, params[scheduler.JobParamCallbackFuncParams]); err != nil {
log.Errorf("failed to run the callback function %s: %v", callbackFuncName, err)
return
}
log.Debugf("callback function %s called for schedule %s", callbackFuncName, h.GetStringFromPath(":id"))
}

View File

@ -6,6 +6,7 @@ require (
github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.9.3 // indirect
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
github.com/FZambia/sentinel v1.1.0
github.com/Masterminds/semver v1.4.2
github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
@ -27,7 +28,6 @@ require (
github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c // indirect
github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
github.com/garyburd/redigo v1.6.0
github.com/ghodss/yaml v1.0.0
github.com/go-openapi/errors v0.19.2
github.com/go-openapi/loads v0.19.4

View File

@ -51,6 +51,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/FZambia/sentinel v1.1.0 h1:qrCBfxc8SvJihYNjBWgwUI93ZCvFe/PJIPTHKmlp8a8=
github.com/FZambia/sentinel v1.1.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI=
github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg=
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
@ -253,8 +255,6 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -325,6 +325,7 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K
github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0=
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
@ -444,6 +445,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -582,9 +584,11 @@ github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXW
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@ -994,6 +998,7 @@ gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
@ -1010,6 +1015,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U=
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -25,7 +25,6 @@ import (
"net"
"net/url"
"os"
"strconv"
"strings"
)
@ -106,40 +105,6 @@ func IsValidURL(address string) bool {
return true
}
// TranslateRedisAddress translates the comma format to redis URL
func TranslateRedisAddress(commaFormat string) (string, bool) {
if IsEmptyStr(commaFormat) {
return "", false
}
sections := strings.Split(commaFormat, ",")
totalSections := len(sections)
if totalSections == 0 {
return "", false
}
urlParts := make([]string, 0)
// section[0] should be host:port
redisURL := fmt.Sprintf("redis://%s", sections[0])
if _, err := url.Parse(redisURL); err != nil {
return "", false
}
urlParts = append(urlParts, "redis://", sections[0])
// Ignore weight
// Check password
if totalSections >= 3 && !IsEmptyStr(sections[2]) {
urlParts = []string{urlParts[0], fmt.Sprintf("%s:%s@", "arbitrary_username", sections[2]), urlParts[1]}
}
if totalSections >= 4 && !IsEmptyStr(sections[3]) {
if _, err := strconv.Atoi(sections[3]); err == nil {
urlParts = append(urlParts, "/", sections[3])
}
}
return strings.Join(urlParts, ""), true
}
// SerializeJob encodes work.Job to json data.
func SerializeJob(job *work.Job) ([]byte, error) {
return json.Marshal(job)

View File

@ -150,13 +150,10 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
redisAddress := c.PoolConfig.RedisPoolCfg.RedisURL
if !utils.IsEmptyStr(redisAddress) {
if _, err := url.Parse(redisAddress); err != nil {
if redisURL, ok := utils.TranslateRedisAddress(redisAddress); ok {
c.PoolConfig.RedisPoolCfg.RedisURL = redisURL
}
} else {
if !strings.HasPrefix(redisAddress, redisSchema) {
c.PoolConfig.RedisPoolCfg.RedisURL = fmt.Sprintf("%s%s", redisSchema, redisAddress)
}
return fmt.Errorf("bad redis url for jobservice, %s", redisAddress)
}
if !strings.Contains(redisAddress, "://") {
c.PoolConfig.RedisPoolCfg.RedisURL = fmt.Sprintf("%s%s", redisSchema, redisAddress)
}
}
}
@ -313,8 +310,7 @@ func (c *Configuration) validate() error {
if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.RedisURL) {
return errors.New("URL of redis worker is empty")
}
if !strings.HasPrefix(c.PoolConfig.RedisPoolCfg.RedisURL, redisSchema) {
if !strings.Contains(c.PoolConfig.RedisPoolCfg.RedisURL, "://") {
return errors.New("invalid redis URL")
}

View File

@ -71,7 +71,7 @@ func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() {
)
assert.Equal(
suite.T(),
"redis://arbitrary_username:password@8.8.8.8:6379/0",
"redis://:password@8.8.8.8:6379/2",
cfg.PoolConfig.RedisPoolCfg.RedisURL,
"expect redis URL 'localhost' but got '%s'",
cfg.PoolConfig.RedisPoolCfg.RedisURL,
@ -132,7 +132,7 @@ func setENV() error {
err = os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key")
err = os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis")
err = os.Setenv("JOB_SERVICE_POOL_WORKERS", "8")
err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0")
err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "redis://:password@8.8.8.8:6379/2")
err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
err = os.Setenv("JOBSERVICE_SECRET", "js_secret")
err = os.Setenv("CORE_SECRET", "core_secret")

View File

@ -18,7 +18,7 @@ worker_pool:
#Additional config if use 'redis' backend
redis_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
#or ipaddress:port[,weight,password,database_index]
#or ipaddress:port[|weight|password|database_index]
redis_url: "localhost:6379"
namespace: "testing_job_service_v2"

View File

@ -135,7 +135,7 @@ func (bc *basicController) GetJobLogData(jobID string) ([]byte, error) {
logData, err := logger.Retrieve(jobID)
if err != nil {
return nil, err
return nil, errors.Wrapf(err, "error for getting log of job %s", jobID)
}
return logData, nil

View File

@ -15,24 +15,24 @@
package gc
import (
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/pkg/artifactrash/model"
blob_models "github.com/goharbor/harbor/src/pkg/blob/models"
"os"
"strconv"
"time"
"github.com/goharbor/harbor/src/lib/errors"
redislib "github.com/goharbor/harbor/src/lib/redis"
"github.com/goharbor/harbor/src/pkg/artifactrash/model"
blob_models "github.com/goharbor/harbor/src/pkg/blob/models"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/registryctl"
"github.com/goharbor/harbor/src/controller/artifact"
"github.com/goharbor/harbor/src/controller/project"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/artifactrash"
"github.com/goharbor/harbor/src/pkg/blob"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/registryctl"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/registryctl/client"
)
@ -302,17 +302,19 @@ func (gc *GarbageCollector) sweep(ctx job.Context) error {
// cleanCache is to clean the registry cache for GC.
// To do this is because the issue https://github.com/docker/distribution/issues/2094
func (gc *GarbageCollector) cleanCache() error {
con, err := redis.DialURL(
gc.redisURL,
redis.DialConnectTimeout(dialConnectionTimeout),
redis.DialReadTimeout(dialReadTimeout),
redis.DialWriteTimeout(dialWriteTimeout),
)
pool, err := redislib.GetRedisPool("GarbageCollector", gc.redisURL, &redislib.PoolParam{
PoolMaxIdle: 0,
PoolMaxActive: 1,
PoolIdleTimeout: 60 * time.Second,
DialConnectionTimeout: dialConnectionTimeout,
DialReadTimeout: dialReadTimeout,
DialWriteTimeout: dialWriteTimeout,
})
if err != nil {
gc.logger.Errorf("failed to connect to redis %v", err)
return err
}
con := pool.Get()
defer con.Close()
// clean all keys in registry redis DB.

View File

@ -2,9 +2,9 @@ package gc
import (
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/pkg/registry"
"github.com/gomodule/redigo/redis"
)
// delKeys ...

View File

@ -17,14 +17,13 @@ package runtime
import (
"context"
"fmt"
redislib "github.com/goharbor/harbor/src/lib/redis"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/goharbor/harbor/src/pkg/p2p/preheat"
"github.com/goharbor/harbor/src/jobservice/api"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
@ -44,6 +43,7 @@ import (
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/goharbor/harbor/src/jobservice/worker/cworker"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/pkg/p2p/preheat"
"github.com/goharbor/harbor/src/pkg/retention"
sc "github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/pkg/scan/all"
@ -53,8 +53,7 @@ import (
const (
dialConnectionTimeout = 30 * time.Second
healthCheckPeriod = time.Minute
dialReadTimeout = healthCheckPeriod + 10*time.Second
dialReadTimeout = 10 * time.Second
dialWriteTimeout = 10 * time.Second
)
@ -279,25 +278,15 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool(
// Get a redis connection pool
func (bs *Bootstrap) getRedisPool(redisPoolConfig *config.RedisPoolConfig) *redis.Pool {
return &redis.Pool{
MaxIdle: 6,
Wait: true,
IdleTimeout: time.Duration(redisPoolConfig.IdleTimeoutSecond) * time.Second,
Dial: func() (redis.Conn, error) {
return redis.DialURL(
redisPoolConfig.RedisURL,
redis.DialConnectTimeout(dialConnectionTimeout),
redis.DialReadTimeout(dialReadTimeout),
redis.DialWriteTimeout(dialWriteTimeout),
)
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Minute {
return nil
}
_, err := c.Do("PING")
return err
},
if pool, err := redislib.GetRedisPool("JobService", redisPoolConfig.RedisURL, &redislib.PoolParam{
PoolMaxIdle: 6,
PoolIdleTimeout: time.Duration(redisPoolConfig.IdleTimeoutSecond) * time.Second,
DialConnectionTimeout: dialConnectionTimeout,
DialReadTimeout: dialReadTimeout,
DialWriteTimeout: dialWriteTimeout,
}); err != nil {
panic(err)
} else {
return pool
}
}

View File

@ -18,6 +18,7 @@ package tests
import (
"errors"
"fmt"
redislib "github.com/goharbor/harbor/src/lib/redis"
"os"
"time"
@ -36,22 +37,14 @@ const (
// GiveMeRedisPool ...
func GiveMeRedisPool() *redis.Pool {
redisHost := getRedisHost()
redisPool := &redis.Pool{
MaxActive: 6,
MaxIdle: 6,
Wait: true,
Dial: func() (redis.Conn, error) {
return redis.Dial(
"tcp",
fmt.Sprintf("%s:%d", redisHost, 6379),
redis.DialConnectTimeout(dialConnectionTimeout),
redis.DialReadTimeout(dialReadTimeout),
redis.DialWriteTimeout(dialWriteTimeout),
)
},
}
return redisPool
pool, _ := redislib.GetRedisPool("test", fmt.Sprintf("redis://%s:%d", redisHost, 6379), &redislib.PoolParam{
PoolMaxIdle: 6,
PoolMaxActive: 6,
DialConnectionTimeout: dialConnectionTimeout,
DialReadTimeout: dialReadTimeout,
DialWriteTimeout: dialWriteTimeout,
})
return pool
}
// GiveMeTestNamespace ...

View File

@ -42,7 +42,7 @@ func WrapConflictError(err error, format string, args ...interface{}) error {
// as a src/internal/error.Error with not found error code, else return nil
func AsNotFoundError(err error, messageFormat string, args ...interface{}) *errors.Error {
if errors.Is(err, orm.ErrNoRows) {
e := errors.NotFoundError(err)
e := errors.NotFoundError(nil)
if len(messageFormat) > 0 {
e.WithMessage(messageFormat, args...)
}

72
src/lib/redis/helper.go Normal file
View File

@ -0,0 +1,72 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"os"
"strconv"
"sync"
"time"
"github.com/goharbor/harbor/src/core/config"
"github.com/gomodule/redigo/redis"
)
var (
pool *redis.Pool
poolOnce sync.Once
poolMaxIdle = 200
poolMaxActive = 1000
poolIdleTimeout int64 = 180
dialConnectionTimeout = 30 * time.Second
dialReadTimeout = 10 * time.Second
dialWriteTimeout = 10 * time.Second
)
// DefaultPool return default redis pool
func DefaultPool() *redis.Pool {
poolOnce.Do(func() {
maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE"))
if err != nil || maxIdle < 0 {
maxIdle = poolMaxIdle
}
maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE"))
if err != nil || maxActive < 0 {
maxActive = poolMaxActive
}
idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64)
if err != nil || idleTimeout < 0 {
idleTimeout = poolIdleTimeout
}
url := config.GetRedisOfRegURL()
if url == "" {
url = "redis://localhost:6379/1"
}
pool, err = GetRedisPool("CommonRedis", url, &PoolParam{
PoolMaxIdle: maxIdle,
PoolMaxActive: maxActive,
PoolIdleTimeout: time.Duration(idleTimeout) * time.Second,
DialConnectionTimeout: dialConnectionTimeout,
DialReadTimeout: dialReadTimeout,
DialWriteTimeout: dialWriteTimeout,
})
})
return pool
}

View File

@ -12,29 +12,29 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package model
package redis
import (
"time"
"github.com/astaxie/beego/orm"
"fmt"
"github.com/stretchr/testify/require"
"os"
"testing"
)
func init() {
orm.RegisterModel(
new(Schedule))
const testingRedisHost = "REDIS_HOST"
func TestGetRedisPool(t *testing.T) {
pool, err := GetRedisPool("test", fmt.Sprintf("redis://%s:%d", getRedisHost(), 6379), nil)
require.Nil(t, err)
conn := pool.Get()
defer conn.Close()
}
// Schedule is a record for a scheduler job
type Schedule struct {
ID int64 `orm:"pk;auto;column(id)" json:"id"`
JobID string `orm:"column(job_id)" json:"job_id"`
Status string `orm:"column(status)" json:"status"`
CreationTime *time.Time `orm:"column(creation_time)" json:"creation_time"`
UpdateTime *time.Time `orm:"column(update_time)" json:"update_time"`
}
func getRedisHost() string {
redisHost := os.Getenv(testingRedisHost)
if redisHost == "" {
redisHost = "127.0.0.1" // for local test
}
// ScheduleQuery is query for schedule
type ScheduleQuery struct {
JobID string
return redisHost
}

View File

@ -0,0 +1,172 @@
package redis
import (
"fmt"
"github.com/goharbor/harbor/src/lib/log"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/FZambia/sentinel"
"github.com/gomodule/redigo/redis"
)
var knownPool sync.Map
var m sync.Mutex
// PoolParam ...
type PoolParam struct {
PoolMaxIdle int
PoolMaxActive int
PoolIdleTimeout time.Duration
DialConnectionTimeout time.Duration
DialReadTimeout time.Duration
DialWriteTimeout time.Duration
}
// GetRedisPool get a named redis pool
// supported rawurl
// redis://user:pass@redis_host:port/db
// redis+sentinel://user:pass@redis_sentinel1:port1,redis_sentinel2:port2/monitor_name/db?idle_timeout_seconds=100
func GetRedisPool(name string, rawurl string, param *PoolParam) (*redis.Pool, error) {
if p, ok := knownPool.Load(name); ok {
return p.(*redis.Pool), nil
}
m.Lock()
defer m.Unlock()
// load again in case multi threads
if p, ok := knownPool.Load(name); ok {
return p.(*redis.Pool), nil
}
u, err := url.Parse(rawurl)
if err != nil {
return nil, fmt.Errorf("bad redis url: %s, %s, %s", name, rawurl, err)
}
if param == nil {
param = &PoolParam{
PoolMaxIdle: 0,
PoolMaxActive: 1,
PoolIdleTimeout: time.Minute,
DialConnectionTimeout: time.Second,
DialReadTimeout: time.Second,
DialWriteTimeout: time.Second,
}
}
if t := u.Query().Get("idle_timeout_seconds"); t != "" {
if tt, e := strconv.Atoi(t); e == nil {
param.PoolIdleTimeout = time.Second * time.Duration(tt)
}
}
log.Debug("get redis pool:", name, rawurl)
if u.Scheme == "redis" {
pool := &redis.Pool{
Dial: func() (redis.Conn, error) {
return redis.DialURL(rawurl)
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
_, err := c.Do("PING")
return err
},
MaxIdle: param.PoolMaxIdle,
MaxActive: param.PoolMaxActive,
IdleTimeout: param.PoolIdleTimeout,
Wait: true,
}
knownPool.Store(name, pool)
return pool, nil
} else if u.Scheme == "redis+sentinel" {
pool, err := getSentinelPool(u, param, err, name)
if err != nil {
return nil, err
}
knownPool.Store(name, pool)
return pool, nil
} else {
return nil, fmt.Errorf("bad redis url: not support scheme %s", u.Scheme)
}
}
func getSentinelPool(u *url.URL, param *PoolParam, err error, name string) (*redis.Pool, error) {
ps := strings.Split(u.Path, "/")
if len(ps) < 2 {
return nil, fmt.Errorf("bad redis sentinel url: no master name, %s %s", name, u)
}
log.Debug("getSentinelPool:", u)
var sentinelOptions []redis.DialOption
if param.DialConnectionTimeout > 0 {
log.Debug(name, "sentinel DialConnectionTimeout:", param.DialConnectionTimeout)
sentinelOptions = append(sentinelOptions, redis.DialConnectTimeout(param.DialConnectionTimeout))
}
if param.DialReadTimeout > 0 {
log.Debug(name, "sentinel DialReadTimeout:", param.DialReadTimeout)
sentinelOptions = append(sentinelOptions, redis.DialReadTimeout(param.DialReadTimeout))
}
if param.DialWriteTimeout > 0 {
log.Debug(name, "sentinel DialWriteTimeout:", param.DialWriteTimeout)
sentinelOptions = append(sentinelOptions, redis.DialWriteTimeout(param.DialWriteTimeout))
}
redisOptions := sentinelOptions
if u.User != nil {
password, isSet := u.User.Password()
if isSet {
log.Debug(name, "redis has password")
redisOptions = append(redisOptions, redis.DialPassword(password))
}
}
// sentinel doesn't need select db
db := 0
if len(ps) > 2 {
db, err = strconv.Atoi(ps[2])
if err != nil {
return nil, fmt.Errorf("invalid redis db: %s, %s", ps[1], name)
}
if db != 0 {
redisOptions = append(redisOptions, redis.DialDatabase(db))
}
}
sntnl := &sentinel.Sentinel{
Addrs: strings.Split(u.Host, ","),
MasterName: ps[1],
Dial: func(addr string) (redis.Conn, error) {
log.Debug(name, "dial redis sentinel:", addr)
c, err := redis.Dial("tcp", addr, sentinelOptions...)
if err != nil {
return nil, err
}
return c, nil
},
}
pool := &redis.Pool{
Dial: func() (redis.Conn, error) {
masterAddr, err := sntnl.MasterAddr()
if err != nil {
return nil, err
}
log.Debug(name, "dial redis master:", masterAddr, "db:", db)
return redis.Dial("tcp", masterAddr, redisOptions...)
},
TestOnBorrow: func(c redis.Conn, t time.Time) error {
if !sentinel.TestRole(c, "master") {
return fmt.Errorf("check role failed, %s", name)
}
return nil
},
MaxIdle: param.PoolMaxIdle,
MaxActive: param.PoolMaxActive,
IdleTimeout: param.PoolIdleTimeout,
Wait: true,
}
return pool, nil
}

View File

@ -22,7 +22,6 @@ import (
"strings"
"time"
beego_orm "github.com/astaxie/beego/orm"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q"
@ -180,41 +179,29 @@ func (d *dao) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, e
return -1, err
}
// each update will auto increase version and update time
data := make(beego_orm.Params)
data["version"] = beego_orm.ColValue(beego_orm.ColAdd, 1)
data["update_time"] = time.Now()
data["status"] = blob.Status
qt := o.QueryTable(&models.Blob{})
cond := beego_orm.NewCondition()
var c *beego_orm.Condition
// In the multiple blob head scenario, if one request success mark the blob from StatusDelete to StatusNone, then version should increase one.
// in the meantime, the other requests tries to do the same thing, use 'where version >= blob.version' can handle it.
var sql string
if blob.Status == models.StatusNone {
c = cond.And("version__gte", blob.Version)
sql = `UPDATE blob SET version = version + 1, update_time = ?, status = ? where id = ? AND version >= ? AND status IN (%s) RETURNING version as new_vesrion`
} else {
c = cond.And("version", blob.Version)
sql = `UPDATE blob SET version = version + 1, update_time = ?, status = ? where id = ? AND version = ? AND status IN (%s) RETURNING version as new_vesrion`
}
/*
generated simple sql string.
UPDATE "blob" SET "version" = "version" + $1, "update_time" = $2, "status" = $3
WHERE "id" IN ( SELECT T0."id" FROM "blob" T0 WHERE T0."version" >= $4 AND T0."id" = $5 AND T0."status" IN ('delete', 'deleting') )
*/
var newVersion int64
params := []interface{}{time.Now(), blob.Status, blob.ID, blob.Version}
stats := models.StatusMap[blob.Status]
for _, stat := range stats {
params = append(params, stat)
}
if err := o.Raw(fmt.Sprintf(sql, orm.ParamPlaceholderForIn(len(models.StatusMap[blob.Status]))), params...).QueryRow(&newVersion); err != nil {
if e := orm.AsNotFoundError(err, "no blob is updated"); e != nil {
log.Warningf("no blob is updated according to query condition, id: %d, status_in, %v, err: %v", blob.ID, models.StatusMap[blob.Status], e)
return 0, nil
}
return -1, err
}
count, err := qt.SetCond(c).Filter("id", blob.ID).
Filter("status__in", models.StatusMap[blob.Status]).
Update(data)
if err != nil {
return count, err
}
if count == 0 {
log.Warningf("no blob is updated according to query condition, id: %d, status_in, %v", blob.ID, models.StatusMap[blob.Status])
return 0, nil
}
return count, nil
blob.Version = newVersion
return 1, nil
}
// UpdateBlob cannot handle the status change and version increase, for handling blob status change, please call

View File

@ -169,20 +169,26 @@ func (suite *DaoTestSuite) TestUpdateBlobStatus() {
count, err := suite.dao.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(0), count)
blob, err = suite.dao.GetBlobByDigest(ctx, digest)
if suite.Nil(err) {
suite.Equal(int64(0), blob.Version)
suite.Equal(models.StatusNone, blob.Status)
}
blob.Status = models.StatusDelete
count, err = suite.dao.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(1), count)
blob.Status = models.StatusDeleting
count, err = suite.dao.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(1), count)
blob.Status = models.StatusDeleteFailed
count, err = suite.dao.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(1), count)
blob, err = suite.dao.GetBlobByDigest(ctx, digest)
if suite.Nil(err) {
suite.Equal(int64(1), blob.Version)
suite.Equal(models.StatusDelete, blob.Status)
suite.Equal(int64(3), blob.Version)
suite.Equal(models.StatusDeleteFailed, blob.Status)
}
}

View File

@ -16,6 +16,7 @@ package blob
import (
"context"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/pkg/blob/dao"
"github.com/goharbor/harbor/src/pkg/blob/models"
)
@ -121,6 +122,10 @@ func (m *manager) Update(ctx context.Context, blob *Blob) error {
}
func (m *manager) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, error) {
_, exist := models.StatusMap[blob.Status]
if !exist {
return -1, errors.New(nil).WithMessage("cannot update blob status, as the status is unknown. digest: %s, status: %s", blob.Digest, blob.Status)
}
return m.dao.UpdateBlobStatus(ctx, blob)
}

View File

@ -283,9 +283,22 @@ func (suite *ManagerTestSuite) TestUpdateStatus() {
blob, err := Mgr.Get(ctx, digest)
if suite.Nil(err) {
blob.Status = models.StatusDelete
_, err := Mgr.UpdateBlobStatus(ctx, blob)
blob.Status = "unknown"
count, err := Mgr.UpdateBlobStatus(ctx, blob)
suite.NotNil(err)
suite.Equal(int64(-1), count)
// StatusNone cannot be updated to StatusDeleting
blob.Status = models.StatusDeleting
count, err = Mgr.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(0), count)
blob.Status = models.StatusDelete
count, err = Mgr.UpdateBlobStatus(ctx, blob)
suite.Nil(err)
suite.Equal(int64(1), count)
{
blob, err := Mgr.Get(ctx, digest)

View File

@ -16,6 +16,7 @@ package retention
import (
"fmt"
"github.com/goharbor/harbor/src/lib/orm"
"time"
"github.com/goharbor/harbor/src/pkg/project"
@ -65,7 +66,7 @@ type DefaultAPIController struct {
const (
// SchedulerCallback ...
SchedulerCallback = "SchedulerCallback"
SchedulerCallback = "RetentionCallback"
)
// TriggerParam ...
@ -84,7 +85,7 @@ func (r *DefaultAPIController) CreateRetention(p *policy.Metadata) (int64, error
if p.Trigger.Kind == policy.TriggerKindSchedule {
cron, ok := p.Trigger.Settings[policy.TriggerSettingsCron]
if ok && len(cron.(string)) > 0 {
jobid, err := r.scheduler.Schedule(cron.(string), SchedulerCallback, TriggerParam{
jobid, err := r.scheduler.Schedule(orm.Context(), cron.(string), SchedulerCallback, TriggerParam{
PolicyID: p.ID,
Trigger: ExecutionTriggerSchedule,
})
@ -142,13 +143,13 @@ func (r *DefaultAPIController) UpdateRetention(p *policy.Metadata) error {
}
}
if needUn {
err = r.scheduler.UnSchedule(p0.Trigger.References[policy.TriggerReferencesJobid].(int64))
err = r.scheduler.UnSchedule(orm.Context(), p0.Trigger.References[policy.TriggerReferencesJobid].(int64))
if err != nil {
return err
}
}
if needSch {
jobid, err := r.scheduler.Schedule(p.Trigger.Settings[policy.TriggerSettingsCron].(string), SchedulerCallback, TriggerParam{
jobid, err := r.scheduler.Schedule(orm.Context(), p.Trigger.Settings[policy.TriggerSettingsCron].(string), SchedulerCallback, TriggerParam{
PolicyID: p.ID,
Trigger: ExecutionTriggerSchedule,
})
@ -168,7 +169,7 @@ func (r *DefaultAPIController) DeleteRetention(id int64) error {
return err
}
if p.Trigger.Kind == policy.TriggerKindSchedule && len(p.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 {
err = r.scheduler.UnSchedule(p.Trigger.References[policy.TriggerReferencesJobid].(int64))
err = r.scheduler.UnSchedule(orm.Context(), p.Trigger.References[policy.TriggerReferencesJobid].(int64))
if err != nil {
return err
}

View File

@ -1,9 +1,11 @@
package retention
import (
"context"
"github.com/goharbor/harbor/src/pkg/retention/dep"
"github.com/goharbor/harbor/src/pkg/retention/policy"
"github.com/goharbor/harbor/src/pkg/retention/policy/rule"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/testing/pkg/repository"
"github.com/stretchr/testify/suite"
"strings"
@ -201,14 +203,18 @@ func (s *ControllerTestSuite) TestExecution() {
type fakeRetentionScheduler struct {
}
func (f *fakeRetentionScheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) {
func (f *fakeRetentionScheduler) Schedule(ctx context.Context, cron string, callbackFuncName string, params interface{}) (int64, error) {
return 111, nil
}
func (f *fakeRetentionScheduler) UnSchedule(id int64) error {
func (f *fakeRetentionScheduler) UnSchedule(ctx context.Context, id int64) error {
return nil
}
func (f *fakeRetentionScheduler) GetSchedule(ctx context.Context, id int64) (*scheduler.Schedule, error) {
return nil, nil
}
type fakeLauncher struct {
}

View File

@ -0,0 +1,89 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"fmt"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/task"
)
var (
registry = make(map[string]CallbackFunc)
)
// CallbackFunc defines the function that the scheduler calls when triggered
type CallbackFunc func(interface{}) error
func init() {
if err := task.RegisterCheckInProcessor(JobNameScheduler, triggerCallback); err != nil {
log.Errorf("failed to register check in processor for scheduler: %v", err)
}
}
// RegisterCallbackFunc registers the callback function which will be called when the scheduler is triggered
func RegisterCallbackFunc(name string, callbackFunc CallbackFunc) error {
if len(name) == 0 {
return errors.New("empty name")
}
if callbackFunc == nil {
return errors.New("callback function is nil")
}
_, exist := registry[name]
if exist {
return fmt.Errorf("callback function %s already exists", name)
}
registry[name] = callbackFunc
return nil
}
func getCallbackFunc(name string) (CallbackFunc, error) {
f, exist := registry[name]
if !exist {
return nil, fmt.Errorf("callback function %s not found", name)
}
return f, nil
}
func callbackFuncExist(name string) bool {
_, exist := registry[name]
return exist
}
func triggerCallback(ctx context.Context, task *task.Task, change *job.StatusChange) (err error) {
schedules, err := Sched.(*scheduler).dao.List(ctx, &q.Query{
Keywords: map[string]interface{}{
"ExecutionID": task.ExecutionID,
},
})
if err != nil {
return err
}
if len(schedules) == 0 {
return fmt.Errorf("the schedule whose execution ID is %d not found", task.ExecutionID)
}
callbackFunc, err := getCallbackFunc(schedules[0].CallbackFuncName)
if err != nil {
return err
}
return callbackFunc(schedules[0].CallbackFuncParam)
}

View File

@ -0,0 +1,73 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"testing"
"github.com/stretchr/testify/suite"
)
type callbackTestSuite struct {
suite.Suite
}
func (c *callbackTestSuite) SetupTest() {
registry = map[string]CallbackFunc{}
err := RegisterCallbackFunc("callback", func(interface{}) error { return nil })
c.Require().Nil(err)
}
func (c *callbackTestSuite) TestRegisterCallbackFunc() {
// empty name
err := RegisterCallbackFunc("", nil)
c.NotNil(err)
// nil callback function
err = RegisterCallbackFunc("test", nil)
c.NotNil(err)
// pass
err = RegisterCallbackFunc("test", func(interface{}) error { return nil })
c.Nil(err)
// duplicate name
err = RegisterCallbackFunc("test", func(interface{}) error { return nil })
c.NotNil(err)
}
func (c *callbackTestSuite) TestGetCallbackFunc() {
// not exist
_, err := getCallbackFunc("not-exist")
c.NotNil(err)
// pass
f, err := getCallbackFunc("callback")
c.Require().Nil(err)
c.NotNil(f)
}
func (c *callbackTestSuite) TestCallbackFuncExist() {
// not exist
c.False(callbackFuncExist("not-exist"))
// exist
c.True(callbackFuncExist("callback"))
}
func TestCallbackTestSuite(t *testing.T) {
s := &callbackTestSuite{}
suite.Run(t, s)
}

128
src/pkg/scheduler/dao.go Normal file
View File

@ -0,0 +1,128 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"time"
beegoorm "github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q"
)
func init() {
beegoorm.RegisterModel(&schedule{})
}
type schedule struct {
ID int64 `orm:"pk;auto;column(id)"`
CRON string `orm:"column(cron)"`
ExecutionID int64 `orm:"column(execution_id)"`
CallbackFuncName string `orm:"column(callback_func_name)"`
CallbackFuncParam string `orm:"column(callback_func_param)"`
CreationTime time.Time `orm:"column(creation_time)"`
UpdateTime time.Time `orm:"column(update_time)"`
}
// DAO is the data access object interface for schedule
type DAO interface {
Create(ctx context.Context, schedule *schedule) (id int64, err error)
List(ctx context.Context, query *q.Query) (schedules []*schedule, err error)
Get(ctx context.Context, id int64) (schedule *schedule, err error)
Delete(ctx context.Context, id int64) (err error)
Update(ctx context.Context, schedule *schedule, props ...string) (err error)
}
type dao struct{}
func (d *dao) Create(ctx context.Context, schedule *schedule) (int64, error) {
ormer, err := orm.FromContext(ctx)
if err != nil {
return 0, err
}
id, err := ormer.Insert(schedule)
if err != nil {
if e := orm.AsForeignKeyError(err,
"the schedule tries to reference a non existing execution %d", schedule.ExecutionID); e != nil {
err = e
}
return 0, err
}
return id, nil
}
func (d *dao) List(ctx context.Context, query *q.Query) ([]*schedule, error) {
qs, err := orm.QuerySetter(ctx, &schedule{}, query)
if err != nil {
return nil, err
}
schedules := []*schedule{}
if _, err = qs.All(&schedules); err != nil {
return nil, err
}
return schedules, nil
}
func (d *dao) Get(ctx context.Context, id int64) (*schedule, error) {
ormer, err := orm.FromContext(ctx)
if err != nil {
return nil, err
}
schedule := &schedule{
ID: id,
}
if err = ormer.Read(schedule); err != nil {
if e := orm.AsNotFoundError(err, "schedule %d not found", id); e != nil {
err = e
}
return nil, err
}
return schedule, nil
}
func (d *dao) Delete(ctx context.Context, id int64) error {
ormer, err := orm.FromContext(ctx)
if err != nil {
return err
}
n, err := ormer.Delete(&schedule{
ID: id,
})
if err != nil {
return err
}
if n == 0 {
return errors.NotFoundError(nil).WithMessage("schedule %d not found", id)
}
return nil
}
func (d *dao) Update(ctx context.Context, schedule *schedule, props ...string) error {
ormer, err := orm.FromContext(ctx)
if err != nil {
return err
}
n, err := ormer.Update(schedule, props...)
if err != nil {
return err
}
if n == 0 {
return errors.NotFoundError(nil).WithMessage("schedule %d not found", schedule.ID)
}
return nil
}

View File

@ -1,99 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"errors"
"fmt"
"time"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
)
// ScheduleDao defines the method that a schedule data access model should implement
type ScheduleDao interface {
Create(*model.Schedule) (int64, error)
Update(*model.Schedule, ...string) error
Delete(int64) error
Get(int64) (*model.Schedule, error)
List(...*model.ScheduleQuery) ([]*model.Schedule, error)
}
// New returns an instance of the default schedule data access model implementation
func New() ScheduleDao {
return &scheduleDao{}
}
type scheduleDao struct{}
func (s *scheduleDao) Create(schedule *model.Schedule) (int64, error) {
if schedule == nil {
return 0, errors.New("nil schedule")
}
now := time.Now()
schedule.CreationTime = &now
schedule.UpdateTime = &now
return dao.GetOrmer().Insert(schedule)
}
func (s *scheduleDao) Update(schedule *model.Schedule, cols ...string) error {
if schedule == nil {
return errors.New("nil schedule")
}
if schedule.ID <= 0 {
return fmt.Errorf("invalid ID: %d", schedule.ID)
}
now := time.Now()
schedule.UpdateTime = &now
_, err := dao.GetOrmer().Update(schedule, cols...)
return err
}
func (s *scheduleDao) Delete(id int64) error {
_, err := dao.GetOrmer().Delete(&model.Schedule{
ID: id,
})
return err
}
func (s *scheduleDao) Get(id int64) (*model.Schedule, error) {
schedule := &model.Schedule{
ID: id,
}
if err := dao.GetOrmer().Read(schedule); err != nil {
if err == orm.ErrNoRows {
return nil, nil
}
return nil, err
}
return schedule, nil
}
func (s *scheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
qs := dao.GetOrmer().QueryTable(&model.Schedule{})
if len(query) > 0 && query[0] != nil {
if len(query[0].JobID) > 0 {
qs = qs.Filter("JobID", query[0].JobID)
}
}
schedules := []*model.Schedule{}
_, err := qs.All(&schedules)
if err != nil {
return nil, err
}
return schedules, nil
}

View File

@ -1,122 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dao
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
var schDao = &scheduleDao{}
type scheduleTestSuite struct {
suite.Suite
scheduleID int64
}
func (s *scheduleTestSuite) SetupSuite() {
dao.PrepareTestForPostgresSQL()
}
func (s *scheduleTestSuite) SetupTest() {
t := s.T()
id, err := schDao.Create(&model.Schedule{
JobID: "1",
Status: "pending",
})
require.Nil(t, err)
s.scheduleID = id
}
func (s *scheduleTestSuite) TearDownTest() {
// clear
dao.GetOrmer().Raw("delete from schedule").Exec()
}
func (s *scheduleTestSuite) TestCreate() {
t := s.T()
// nil schedule
_, err := schDao.Create(nil)
require.NotNil(t, err)
// pass
_, err = schDao.Create(&model.Schedule{
JobID: "1",
})
require.Nil(t, err)
}
func (s *scheduleTestSuite) TestUpdate() {
t := s.T()
// nil schedule
err := schDao.Update(nil)
require.NotNil(t, err)
// invalid ID
err = schDao.Update(&model.Schedule{})
require.NotNil(t, err)
// pass
err = schDao.Update(&model.Schedule{
ID: s.scheduleID,
Status: "running",
})
require.Nil(t, err)
schedule, err := schDao.Get(s.scheduleID)
require.Nil(t, err)
assert.Equal(t, "running", schedule.Status)
}
func (s *scheduleTestSuite) TestDelete() {
t := s.T()
err := schDao.Delete(s.scheduleID)
require.Nil(t, err)
schedule, err := schDao.Get(s.scheduleID)
require.Nil(t, err)
assert.Nil(t, schedule)
}
func (s *scheduleTestSuite) TestGet() {
t := s.T()
schedule, err := schDao.Get(s.scheduleID)
require.Nil(t, err)
assert.Equal(t, "pending", schedule.Status)
}
func (s *scheduleTestSuite) TestList() {
t := s.T()
// nil query
schedules, err := schDao.List()
require.Nil(t, err)
require.Equal(t, 1, len(schedules))
assert.Equal(t, s.scheduleID, schedules[0].ID)
// query by job ID
schedules, err = schDao.List(&model.ScheduleQuery{
JobID: "1",
})
require.Nil(t, err)
require.Equal(t, 1, len(schedules))
assert.Equal(t, s.scheduleID, schedules[0].ID)
}
func TestScheduleDao(t *testing.T) {
suite.Run(t, &scheduleTestSuite{})
}

View File

@ -0,0 +1,128 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"testing"
common_dao "github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/task"
"github.com/stretchr/testify/suite"
)
type daoTestSuite struct {
suite.Suite
dao DAO
execMgr task.ExecutionManager
ctx context.Context
id int64
execID int64
}
func (d *daoTestSuite) SetupSuite() {
d.dao = &dao{}
d.execMgr = task.NewExecutionManager()
common_dao.PrepareTestForPostgresSQL()
d.ctx = orm.Context()
}
func (d *daoTestSuite) SetupTest() {
execID, err := d.execMgr.Create(d.ctx, "vendor", 0, "trigger")
d.Require().Nil(err)
d.execID = execID
schedule := &schedule{
CRON: "0 * * * * *",
ExecutionID: execID,
CallbackFuncName: "callback_func_01",
CallbackFuncParam: "callback_func_params",
}
id, err := d.dao.Create(d.ctx, schedule)
d.Require().Nil(err)
d.id = id
}
func (d *daoTestSuite) TearDownTest() {
d.Require().Nil(d.dao.Delete(d.ctx, d.id))
d.Require().Nil(d.execMgr.Delete(d.ctx, d.execID))
}
func (d *daoTestSuite) TestCreate() {
// the happy pass is covered in SetupTest
// foreign key error
_, err := d.dao.Create(d.ctx, &schedule{
CRON: "0 * * * * *",
ExecutionID: 10000,
CallbackFuncName: "callback_func",
})
d.True(errors.IsErr(err, errors.ViolateForeignKeyConstraintCode))
}
func (d *daoTestSuite) TestList() {
schedules, err := d.dao.List(d.ctx, &q.Query{
Keywords: map[string]interface{}{
"CallbackFuncName": "callback_func_01",
},
})
d.Require().Nil(err)
d.Require().Len(schedules, 1)
d.Equal(d.id, schedules[0].ID)
}
func (d *daoTestSuite) TestGet() {
// not found
schedule, err := d.dao.Get(d.ctx, 10000)
d.True(errors.IsNotFoundErr(err))
// pass
schedule, err = d.dao.Get(d.ctx, d.id)
d.Require().Nil(err)
d.Equal(d.id, schedule.ID)
}
func (d *daoTestSuite) TestDelete() {
// the happy pass is covered in TearDownTest
// not found
err := d.dao.Delete(d.ctx, 10000)
d.True(errors.IsNotFoundErr(err))
}
func (d *daoTestSuite) TestUpdate() {
// not found
err := d.dao.Update(d.ctx, &schedule{
ID: 10000,
})
d.True(errors.IsNotFoundErr(err))
// pass
err = d.dao.Update(d.ctx, &schedule{
ID: d.id,
CRON: "* */2 * * * *",
}, "CRON")
d.Require().Nil(err)
schedule, err := d.dao.Get(d.ctx, d.id)
d.Require().Nil(err)
d.Equal("* */2 * * * *", schedule.CRON)
}
func TestDaoTestSuite(t *testing.T) {
suite.Run(t, &daoTestSuite{})
}

View File

@ -1,59 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"time"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
)
// GlobalController is an instance of the default controller that can be used globally
var GlobalController = NewController()
// Controller updates the scheduler job status or runs the callback function
type Controller interface {
UpdateStatus(scheduleID int64, status string) error
Run(callbackFuncName string, params interface{}) error
}
// NewController returns an instance of the default controller
func NewController() Controller {
return &controller{
manager: scheduler.GlobalManager,
}
}
type controller struct {
manager scheduler.Manager
}
func (c *controller) UpdateStatus(scheduleID int64, status string) error {
now := time.Now()
return c.manager.Update(&model.Schedule{
ID: scheduleID,
Status: status,
UpdateTime: &now,
}, "Status", "UpdateTime")
}
func (c *controller) Run(callbackFuncName string, params interface{}) error {
f, err := scheduler.GetCallbackFunc(callbackFuncName)
if err != nil {
return err
}
return f(params)
}

View File

@ -1,55 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hook
import (
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
schedulertesting "github.com/goharbor/harbor/src/testing/pkg/scheduler"
"github.com/stretchr/testify/require"
"testing"
)
var h = &controller{
manager: &schedulertesting.FakeManager{},
}
func TestUpdateStatus(t *testing.T) {
// task not exist
err := h.UpdateStatus(1, "running")
require.NotNil(t, err)
// pass
h.manager.(*schedulertesting.FakeManager).Schedules = []*model.Schedule{
{
ID: 1,
Status: "",
},
}
err = h.UpdateStatus(1, "running")
require.Nil(t, err)
}
func TestRun(t *testing.T) {
// callback function not exist
err := h.Run("not-exist", nil)
require.NotNil(t, err)
// pass
err = scheduler.Register("callback", func(interface{}) error { return nil })
require.Nil(t, err)
err = h.Run("callback", nil)
require.Nil(t, err)
}

View File

@ -1,66 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"github.com/goharbor/harbor/src/pkg/scheduler/dao"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
)
var (
// GlobalManager is an instance of the default manager that
// can be used globally
GlobalManager = NewManager()
)
// Manager manages the schedule of the scheduler
type Manager interface {
Create(*model.Schedule) (int64, error)
Update(*model.Schedule, ...string) error
Delete(int64) error
Get(int64) (*model.Schedule, error)
List(...*model.ScheduleQuery) ([]*model.Schedule, error)
}
// NewManager returns an instance of the default manager
func NewManager() Manager {
return &manager{
scheduleDao: dao.New(),
}
}
type manager struct {
scheduleDao dao.ScheduleDao
}
func (m *manager) Create(schedule *model.Schedule) (int64, error) {
return m.scheduleDao.Create(schedule)
}
func (m *manager) Update(schedule *model.Schedule, props ...string) error {
return m.scheduleDao.Update(schedule, props...)
}
func (m *manager) Delete(id int64) error {
return m.scheduleDao.Delete(id)
}
func (m *manager) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
return m.scheduleDao.List(query...)
}
func (m *manager) Get(id int64) (*model.Schedule, error) {
return m.scheduleDao.Get(id)
}

View File

@ -1,110 +0,0 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"testing"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/suite"
)
var mgr *manager
type fakeScheduleDao struct {
schedules []*model.Schedule
mock.Mock
}
func (f *fakeScheduleDao) Create(*model.Schedule) (int64, error) {
f.Called()
return 1, nil
}
func (f *fakeScheduleDao) Update(*model.Schedule, ...string) error {
f.Called()
return nil
}
func (f *fakeScheduleDao) Delete(int64) error {
f.Called()
return nil
}
func (f *fakeScheduleDao) Get(int64) (*model.Schedule, error) {
f.Called()
return nil, nil
}
func (f *fakeScheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
f.Called()
if len(query) == 0 || query[0] == nil {
return f.schedules, nil
}
result := []*model.Schedule{}
for _, sch := range f.schedules {
if sch.JobID == query[0].JobID {
result = append(result, sch)
}
}
return result, nil
}
type managerTestSuite struct {
suite.Suite
}
func (m *managerTestSuite) SetupTest() {
// recreate schedule manager
mgr = &manager{
scheduleDao: &fakeScheduleDao{},
}
}
func (m *managerTestSuite) TestCreate() {
t := m.T()
mgr.scheduleDao.(*fakeScheduleDao).On("Create", mock.Anything)
mgr.Create(nil)
mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Create")
}
func (m *managerTestSuite) TestUpdate() {
t := m.T()
mgr.scheduleDao.(*fakeScheduleDao).On("Update", mock.Anything)
mgr.Update(nil)
mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Update")
}
func (m *managerTestSuite) TestDelete() {
t := m.T()
mgr.scheduleDao.(*fakeScheduleDao).On("Delete", mock.Anything)
mgr.Delete(1)
mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Delete")
}
func (m *managerTestSuite) TestGet() {
t := m.T()
mgr.scheduleDao.(*fakeScheduleDao).On("Get", mock.Anything)
mgr.Get(1)
mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Get")
}
func (m *managerTestSuite) TestList() {
t := m.T()
mgr.scheduleDao.(*fakeScheduleDao).On("List", mock.Anything)
mgr.List(nil)
mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "List")
}
func TestManager(t *testing.T) {
suite.Run(t, &managerTestSuite{})
}

17
src/pkg/scheduler/mock.go Normal file
View File

@ -0,0 +1,17 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
//go:generate mockery -name DAO -output . -outpkg scheduler -filename mock_dao_test.go -structname mockDAO -inpkg

View File

@ -0,0 +1,117 @@
// Code generated by mockery v1.1.2. DO NOT EDIT.
package scheduler
import (
context "context"
q "github.com/goharbor/harbor/src/lib/q"
mock "github.com/stretchr/testify/mock"
)
// mockDAO is an autogenerated mock type for the DAO type
type mockDAO struct {
mock.Mock
}
// Create provides a mock function with given fields: ctx, schedule
func (_m *mockDAO) Create(ctx context.Context, schd *schedule) (int64, error) {
ret := _m.Called(ctx, schd)
var r0 int64
if rf, ok := ret.Get(0).(func(context.Context, *schedule) int64); ok {
r0 = rf(ctx, schd)
} else {
r0 = ret.Get(0).(int64)
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *schedule) error); ok {
r1 = rf(ctx, schd)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Delete provides a mock function with given fields: ctx, id
func (_m *mockDAO) Delete(ctx context.Context, id int64) error {
ret := _m.Called(ctx, id)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
return r0
}
// Get provides a mock function with given fields: ctx, id
func (_m *mockDAO) Get(ctx context.Context, id int64) (*schedule, error) {
ret := _m.Called(ctx, id)
var r0 *schedule
if rf, ok := ret.Get(0).(func(context.Context, int64) *schedule); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*schedule)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List provides a mock function with given fields: ctx, query
func (_m *mockDAO) List(ctx context.Context, query *q.Query) ([]*schedule, error) {
ret := _m.Called(ctx, query)
var r0 []*schedule
if rf, ok := ret.Get(0).(func(context.Context, *q.Query) []*schedule); ok {
r0 = rf(ctx, query)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*schedule)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *q.Query) error); ok {
r1 = rf(ctx, query)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Update provides a mock function with given fields: ctx, schedule, props
func (_m *mockDAO) Update(ctx context.Context, schd *schedule, props ...string) error {
_va := make([]interface{}, len(props))
for _i := range props {
_va[_i] = props[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, schd)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *schedule, ...string) error); ok {
r0 = rf(ctx, schd, props...)
} else {
r0 = ret.Error(0)
}
return r0
}

View File

@ -15,8 +15,6 @@
package scheduler
import (
"encoding/json"
"github.com/goharbor/harbor/src/jobservice/job"
)
@ -51,9 +49,5 @@ func (pj *PeriodicJob) Validate(params job.Parameters) error {
// Run the job
func (pj *PeriodicJob) Run(ctx job.Context, params job.Parameters) error {
data, err := json.Marshal(params)
if err != nil {
return err
}
return ctx.Checkin(string(data))
return ctx.Checkin("checkin")
}

View File

@ -15,194 +15,201 @@
package scheduler
import (
"context"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
chttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/job/models"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
)
// const definitions
const (
JobParamCallbackFunc = "callback_func"
JobParamCallbackFuncParams = "params"
"github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/task"
cronlib "github.com/robfig/cron"
)
var (
// GlobalScheduler is an instance of the default scheduler that
// can be used globally. Call Init() to initialize it first
GlobalScheduler Scheduler
registry = make(map[string]CallbackFunc)
// Sched is an instance of the default scheduler that can be used globally
Sched = New()
)
// CallbackFunc defines the function that the scheduler calls when triggered
type CallbackFunc func(interface{}) error
// Schedule describes the detail information about the created schedule
type Schedule struct {
ID int64 `json:"id"`
CRON string `json:"cron"`
Status string `json:"status"` // status of the underlying task(jobservice job)
CreationTime time.Time `json:"creation_time"`
UpdateTime time.Time `json:"update_time"`
// we can extend this model to include more information(e.g. how many times the schedule already
// runs; when will the schedule runs next time)
}
// Scheduler provides the capability to run a periodic task, a callback function
// needs to be registered before using the scheduler
// The "params" is passed to the callback function specified by "callbackFuncName"
// as encoded json string, so the callback function must decode it before using
type Scheduler interface {
Schedule(cron string, callbackFuncName string, params interface{}) (int64, error)
UnSchedule(id int64) error
}
// Register the callback function with name, and the function will be called
// by the scheduler when the scheduler is triggered
func Register(name string, callbackFunc CallbackFunc) error {
if len(name) == 0 {
return errors.New("empty name")
}
if callbackFunc == nil {
return errors.New("callback function is nil")
}
_, exist := registry[name]
if exist {
return fmt.Errorf("callback function %s already exists", name)
}
registry[name] = callbackFunc
return nil
}
// GetCallbackFunc returns the registered callback function specified by the name
func GetCallbackFunc(name string) (CallbackFunc, error) {
f, exist := registry[name]
if !exist {
return nil, fmt.Errorf("callback function %s not found", name)
}
return f, nil
}
func callbackFuncExist(name string) bool {
_, exist := registry[name]
return exist
}
// Init the GlobalScheduler
func Init() {
GlobalScheduler = New(config.InternalCoreURL())
// Schedule creates a task which calls the specified callback function periodically
// The callback function needs to be registered first
// The "params" is passed to the callback function as encoded json string, so the callback
// function must decode it before using
Schedule(ctx context.Context, cron string, callbackFuncName string, params interface{}) (int64, error)
// UnSchedule the created schedule instance
UnSchedule(ctx context.Context, id int64) error
// GetSchedule gets the schedule specified by ID
GetSchedule(ctx context.Context, id int64) (*Schedule, error)
}
// New returns an instance of the default scheduler
func New(internalCoreURL string) Scheduler {
func New() Scheduler {
return &scheduler{
internalCoreURL: internalCoreURL,
jobserviceClient: job.GlobalClient,
manager: GlobalManager,
dao: &dao{},
execMgr: task.ExecMgr,
taskMgr: task.Mgr,
}
}
type scheduler struct {
sync.RWMutex
internalCoreURL string
manager Manager
jobserviceClient job.Client
dao DAO
execMgr task.ExecutionManager
taskMgr task.Manager
}
func (s *scheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) {
func (s *scheduler) Schedule(ctx context.Context, cron string, callbackFuncName string, params interface{}) (int64, error) {
if _, err := cronlib.Parse(cron); err != nil {
return 0, errors.New(nil).WithCode(errors.BadRequestCode).
WithMessage("invalid cron %s: %v", cron, err)
}
if !callbackFuncExist(callbackFuncName) {
return 0, fmt.Errorf("callback function %s not found", callbackFuncName)
}
// create schedule record
execID, err := s.execMgr.Create(ctx, JobNameScheduler, 0, task.ExecutionTriggerManual)
if err != nil {
return 0, err
}
now := time.Now()
scheduleID, err := s.manager.Create(&model.Schedule{
CreationTime: &now,
UpdateTime: &now,
sched := &schedule{
CRON: cron,
ExecutionID: execID,
CallbackFuncName: callbackFuncName,
CreationTime: now,
UpdateTime: now,
}
if params != nil {
paramsData, err := json.Marshal(params)
if err != nil {
return 0, err
}
sched.CallbackFuncParam = string(paramsData)
}
// create schedule record
// when status/checkin hook comes, the database record must exist,
// so the database record must be created first before submitting job
id, err := s.dao.Create(ctx, sched)
if err != nil {
return 0, err
}
taskID, err := s.taskMgr.Create(ctx, execID, &task.Job{
Name: JobNameScheduler,
Metadata: &job.Metadata{
JobKind: job.KindPeriodic,
Cron: cron,
},
})
if err != nil {
return 0, err
}
// if got error in the following steps, delete the schedule record in database
defer func() {
if err != nil {
e := s.manager.Delete(scheduleID)
if e != nil {
log.Errorf("failed to delete the schedule %d: %v", scheduleID, e)
}
}
}()
log.Debugf("the schedule record %d created", scheduleID)
// submit scheduler job to Jobservice
statusHookURL := fmt.Sprintf("%s/service/notifications/schedules/%d", s.internalCoreURL, scheduleID)
jd := &models.JobData{
Name: JobNameScheduler,
Parameters: map[string]interface{}{
JobParamCallbackFunc: callbackFuncName,
},
Metadata: &models.JobMetadata{
JobKind: job.JobKindPeriodic,
Cron: cron,
},
StatusHook: statusHookURL,
}
if params != nil {
var paramsData []byte
paramsData, err = json.Marshal(params)
if err != nil {
return 0, err
}
jd.Parameters[JobParamCallbackFuncParams] = string(paramsData)
}
jobID, err := s.jobserviceClient.SubmitJob(jd)
// when task manager creating a task, it creates the task database record first and
// then submits the job to jobservice. If the submitting failed, it doesn't return
// any error. So we check the task status to make sure the job is submitted to jobservice
// successfully here
task, err := s.taskMgr.Get(ctx, taskID)
if err != nil {
return 0, err
}
// if got error in the following steps, stop the scheduler job
defer func() {
if err != nil {
if e := s.jobserviceClient.PostAction(jobID, job.JobActionStop); e != nil {
log.Errorf("failed to stop the scheduler job %s: %v", jobID, e)
}
}
}()
log.Debugf("the scheduler job submitted to Jobservice, job ID: %s", jobID)
// populate the job ID for the schedule
err = s.manager.Update(&model.Schedule{
ID: scheduleID,
JobID: jobID,
}, "JobID")
if err != nil {
return 0, err
if task.Status == job.ErrorStatus.String() {
return 0, fmt.Errorf("failed to create the schedule: the task status is %s", job.ErrorStatus.String())
}
return scheduleID, nil
return id, nil
}
func (s *scheduler) UnSchedule(id int64) error {
schedule, err := s.manager.Get(id)
func (s *scheduler) UnSchedule(ctx context.Context, id int64) error {
schedule, err := s.dao.Get(ctx, id)
if err != nil {
if errors.IsNotFoundErr(err) {
log.Warningf("trying to unschedule a non existing schedule %d, skip directly", id)
return nil
}
return err
}
if schedule == nil {
log.Warningf("the schedule record %d not found", id)
return nil
if err = s.execMgr.Stop(ctx, schedule.ExecutionID); err != nil {
return err
}
if err = s.jobserviceClient.PostAction(schedule.JobID, job.JobActionStop); err != nil {
herr, ok := err.(*chttp.Error)
// if the job specified by jobID is not found in Jobservice, just delete
// the schedule record
if !ok || herr.Code != http.StatusNotFound {
// after the stop called, the execution cannot be stopped immediately,
// use the for loop to make sure the execution be in final status before deleting it
for t := 100 * time.Microsecond; t < 5*time.Second; t = t * 2 {
exec, err := s.execMgr.Get(ctx, schedule.ExecutionID)
if err != nil {
return err
}
if job.Status(exec.Status).Final() {
// delete schedule record
if err = s.dao.Delete(ctx, id); err != nil {
return err
}
// delete execution
return s.execMgr.Delete(ctx, schedule.ExecutionID)
}
time.Sleep(t)
}
log.Debugf("the stop action for job %s submitted to the Jobservice", schedule.JobID)
if err = s.manager.Delete(schedule.ID); err != nil {
return fmt.Errorf("failed to unschedule the schedule %d: the execution isn't in final status", id)
}
func (s *scheduler) GetSchedule(ctx context.Context, id int64) (*Schedule, error) {
schedule, err := s.dao.Get(ctx, id)
if err != nil {
return nil, err
}
schd := &Schedule{
ID: schedule.ID,
CRON: schedule.CRON,
CreationTime: schedule.CreationTime,
UpdateTime: schedule.UpdateTime,
}
exec, err := s.execMgr.Get(ctx, schedule.ExecutionID)
if err != nil {
return nil, err
}
schd.Status = exec.Status
return schd, nil
}
// HandleLegacyHook handles the legacy web hook for scheduler
// We rewrite the implementation of scheduler with task manager mechanism in v2.1,
// this method is used to handle the job status hook for the legacy implementation
// We can remove the method and the hook endpoint after several releases
func HandleLegacyHook(ctx context.Context, scheduleID int64, sc *job.StatusChange) error {
scheduler := Sched.(*scheduler)
schedule, err := scheduler.dao.Get(ctx, scheduleID)
if err != nil {
return err
}
log.Debugf("the schedule record %d deleted", schedule.ID)
return nil
tasks, err := scheduler.taskMgr.List(ctx, &q.Query{
Keywords: map[string]interface{}{
"ExecutionID": schedule.ExecutionID,
},
})
if err != nil {
return err
}
if len(tasks) == 0 {
return errors.New(nil).WithCode(errors.NotFoundCode).
WithMessage("no task references the execution %d", schedule.ExecutionID)
}
return task.NewHookHandler().Handle(ctx, tasks[0].ID, sc)
}

View File

@ -15,97 +15,149 @@
package scheduler
import (
"github.com/goharbor/harbor/src/testing/job"
schedulertesting "github.com/goharbor/harbor/src/testing/pkg/scheduler"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/pkg/task"
"github.com/goharbor/harbor/src/testing/mock"
tasktesting "github.com/goharbor/harbor/src/testing/pkg/task"
"github.com/stretchr/testify/suite"
"testing"
)
var sch *scheduler
type schedulerTestSuite struct {
suite.Suite
scheduler *scheduler
dao *mockDAO
execMgr *tasktesting.FakeExecutionManager
taskMgr *tasktesting.FakeManager
}
func (s *schedulerTestSuite) SetupTest() {
t := s.T()
// empty callback function registry before running every test case
// and register a new callback function named "callback"
registry = make(map[string]CallbackFunc)
err := Register("callback", func(interface{}) error { return nil })
require.Nil(t, err)
registry = map[string]CallbackFunc{}
err := RegisterCallbackFunc("callback", func(interface{}) error { return nil })
s.Require().Nil(err)
// recreate the scheduler object
sch = &scheduler{
jobserviceClient: &job.MockJobClient{},
manager: &schedulertesting.FakeManager{},
s.dao = &mockDAO{}
s.execMgr = &tasktesting.FakeExecutionManager{}
s.taskMgr = &tasktesting.FakeManager{}
s.scheduler = &scheduler{
dao: s.dao,
execMgr: s.execMgr,
taskMgr: s.taskMgr,
}
}
func (s *schedulerTestSuite) TestRegister() {
t := s.T()
var name string
var callbackFun CallbackFunc
// empty name
err := Register(name, callbackFun)
require.NotNil(t, err)
// nil callback function
name = "test"
err = Register(name, callbackFun)
require.NotNil(t, err)
// pass
callbackFun = func(interface{}) error { return nil }
err = Register(name, callbackFun)
require.Nil(t, err)
// duplicate name
err = Register(name, callbackFun)
require.NotNil(t, err)
}
func (s *schedulerTestSuite) TestGetCallbackFunc() {
t := s.T()
// not exist
_, err := GetCallbackFunc("not-exist")
require.NotNil(t, err)
// pass
f, err := GetCallbackFunc("callback")
require.Nil(t, err)
assert.NotNil(t, f)
}
func (s *schedulerTestSuite) TestSchedule() {
t := s.T()
// invalid cron
id, err := s.scheduler.Schedule(nil, "", "callback", nil)
s.NotNil(err)
// callback function not exist
_, err := sch.Schedule("0 * * * * *", "not-exist", nil)
require.NotNil(t, err)
id, err = s.scheduler.Schedule(nil, "0 * * * * *", "not-exist", nil)
s.NotNil(err)
// failed to submit to jobservice
s.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
s.dao.On("Create", mock.Anything, mock.Anything).Return(int64(1), nil)
s.taskMgr.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
s.taskMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Task{
ID: 1,
ExecutionID: 1,
Status: job.ErrorStatus.String(),
}, nil)
_, err = s.scheduler.Schedule(nil, "0 * * * * *", "callback", "param")
s.Require().NotNil(err)
s.dao.AssertExpectations(s.T())
s.execMgr.AssertExpectations(s.T())
s.taskMgr.AssertExpectations(s.T())
// reset mocks
s.SetupTest()
// pass
id, err := sch.Schedule("0 * * * * *", "callback", nil)
require.Nil(t, err)
assert.Equal(t, int64(1), id)
s.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
s.dao.On("Create", mock.Anything, mock.Anything).Return(int64(1), nil)
s.taskMgr.On("Create", mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
s.taskMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Task{
ID: 1,
ExecutionID: 1,
Status: job.SuccessStatus.String(),
}, nil)
id, err = s.scheduler.Schedule(nil, "0 * * * * *", "callback", "param")
s.Require().Nil(err)
s.Equal(int64(1), id)
s.dao.AssertExpectations(s.T())
s.execMgr.AssertExpectations(s.T())
s.taskMgr.AssertExpectations(s.T())
}
func (s *schedulerTestSuite) TestUnSchedule() {
t := s.T()
// schedule not exist
err := sch.UnSchedule(1)
require.NotNil(t, err)
// not existing schedule
s.dao.On("Get", mock.Anything, mock.Anything).Return(nil, errors.NotFoundError(nil))
err := s.scheduler.UnSchedule(nil, 10000)
s.Nil(err)
s.dao.AssertExpectations(s.T())
// schedule exist
id, err := sch.Schedule("0 * * * * *", "callback", nil)
require.Nil(t, err)
assert.Equal(t, int64(1), id)
// reset mocks
s.SetupTest()
err = sch.UnSchedule(id)
require.Nil(t, err)
// the underlying task isn't stopped
s.dao.On("Get", mock.Anything, mock.Anything).Return(&schedule{
ID: 1,
CRON: "0 * * * * *",
ExecutionID: 1,
CallbackFuncName: "callback",
}, nil)
s.execMgr.On("Stop", mock.Anything, mock.Anything).Return(nil)
s.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{
ID: 1,
Status: job.RunningStatus.String(),
}, nil)
err = s.scheduler.UnSchedule(nil, 1)
s.NotNil(err)
s.dao.AssertExpectations(s.T())
s.execMgr.AssertExpectations(s.T())
// reset mocks
s.SetupTest()
// pass
s.dao.On("Get", mock.Anything, mock.Anything).Return(&schedule{
ID: 1,
CRON: "0 * * * * *",
ExecutionID: 1,
CallbackFuncName: "callback",
}, nil)
s.execMgr.On("Stop", mock.Anything, mock.Anything).Return(nil)
s.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{
ID: 1,
Status: job.StoppedStatus.String(),
}, nil)
s.dao.On("Delete", mock.Anything, mock.Anything).Return(nil)
s.execMgr.On("Delete", mock.Anything, mock.Anything).Return(nil)
err = s.scheduler.UnSchedule(nil, 1)
s.Nil(err)
s.dao.AssertExpectations(s.T())
s.execMgr.AssertExpectations(s.T())
}
func (s *schedulerTestSuite) TestGetSchedule() {
s.dao.On("Get", mock.Anything, mock.Anything).Return(&schedule{
ID: 1,
CRON: "0 * * * * *",
ExecutionID: 1,
}, nil)
s.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{
ID: 1,
Status: job.SuccessStatus.String(),
}, nil)
schedule, err := s.scheduler.GetSchedule(nil, 1)
s.Require().Nil(err)
s.Equal("0 * * * * *", schedule.CRON)
s.Equal(job.SuccessStatus.String(), schedule.Status)
s.dao.AssertExpectations(s.T())
s.execMgr.AssertExpectations(s.T())
}
func TestScheduler(t *testing.T) {

View File

@ -28,8 +28,8 @@ var (
// CheckInProcessor is the processor to process the check in data which is sent by jobservice via webhook
type CheckInProcessor func(ctx context.Context, task *Task, change *job.StatusChange) (err error)
// Register check in processor for the specific vendor type
func Register(vendorType string, processor CheckInProcessor) error {
// RegisterCheckInProcessor registers check in processor for the specific vendor type
func RegisterCheckInProcessor(vendorType string, processor CheckInProcessor) error {
if _, exist := registry[vendorType]; exist {
return fmt.Errorf("check in processor for %s already exists", vendorType)
}

View File

@ -20,11 +20,11 @@ import (
"github.com/stretchr/testify/assert"
)
func TestRegister(t *testing.T) {
err := Register("test", nil)
func TestRegisterCheckInProcessor(t *testing.T) {
err := RegisterCheckInProcessor("test", nil)
assert.Nil(t, err)
// already exist
err = Register("test", nil)
err = RegisterCheckInProcessor("test", nil)
assert.NotNil(t, err)
}

View File

@ -122,7 +122,7 @@ func (e *executionDAO) Delete(ctx context.Context, id int64) error {
})
if err != nil {
if e := orm.AsForeignKeyError(err,
"the execution %d is referenced by other tasks", id); e != nil {
"the execution %d is referenced by other resources", id); e != nil {
err = e
}
return err

View File

@ -79,7 +79,8 @@ func (m *manager) Create(ctx context.Context, executionID int64, jb *Job, extraA
jobID, err := m.submitJob(ctx, id, jb)
if err != nil {
// failed to submit job to jobservice, update the status of task to error
log.Errorf("failed to submit job to jobservice: %v", err)
err = fmt.Errorf("failed to submit job to jobservice: %v", err)
log.Error(err)
now := time.Now()
err = m.dao.Update(ctx, &dao.Task{
ID: id,
@ -155,12 +156,9 @@ func (m *manager) Stop(ctx context.Context, id int64) error {
return err
}
// if the task is already in final status, return directly
if job.Status(task.Status).Final() {
log.Debugf("the task %d is in final status %s, skip", task.ID, task.Status)
return nil
}
// when a task is in final status, if it's a periodic or retrying job it will
// run again in the near future, so we must operate the stop action to these final
// status jobs as well
if err = m.jsClient.PostAction(task.JobID, string(job.StopCommand)); err != nil {
// job not found, update it's status to stop directly
if err == cjob.ErrJobNotFound {

View File

@ -70,21 +70,7 @@ func (t *taskManagerTestSuite) TestCreate() {
}
func (t *taskManagerTestSuite) TestStop() {
// the task is in final status
t.dao.On("Get", mock.Anything, mock.Anything).Return(&dao.Task{
ID: 1,
ExecutionID: 1,
Status: job.SuccessStatus.String(),
}, nil)
err := t.mgr.Stop(nil, 1)
t.Require().Nil(err)
t.dao.AssertExpectations(t.T())
// reset mock
t.SetupTest()
// the task isn't in final status, job not found
// job not found
t.dao.On("Get", mock.Anything, mock.Anything).Return(&dao.Task{
ID: 1,
ExecutionID: 1,
@ -93,7 +79,7 @@ func (t *taskManagerTestSuite) TestStop() {
t.jsClient.On("PostAction", mock.Anything, mock.Anything).Return(cjob.ErrJobNotFound)
t.dao.On("Update", mock.Anything, mock.Anything,
mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
err = t.mgr.Stop(nil, 1)
err := t.mgr.Stop(nil, 1)
t.Require().Nil(err)
t.dao.AssertExpectations(t.T())
t.jsClient.AssertExpectations(t.T())
@ -101,7 +87,7 @@ func (t *taskManagerTestSuite) TestStop() {
// reset mock
t.SetupTest()
// the task isn't in final status
// pass
t.dao.On("Get", mock.Anything, mock.Anything).Return(&dao.Task{
ID: 1,
ExecutionID: 1,

View File

@ -374,24 +374,53 @@
[(ngModel)]="currentConfig.oidc_scope.value" id="oidcScope" size="40" required
[disabled]="disabled(currentConfig.oidc_scope)" pattern="^(\w+,){0,}openid(,\w+){0,}$" />
<clr-control-error>{{'TOOLTIP.SCOPE_REQUIRED' | translate}}</clr-control-error>
</clr-input-container>
<clr-checkbox-container>
<label for="oidc_verify_cert">{{'CONFIG.OIDC.OIDC_VERIFYCERT' | translate}}
<clr-tooltip>
<clr-icon clrTooltipTrigger shape="info-circle" size="24"></clr-icon>
<clr-tooltip-content clrPosition="top-right" clrSize="lg" *clrIfOpen>
<span>{{'TOOLTIP.OIDC_VERIFYCERT' | translate}}</span>
</clr-tooltip-content>
</clr-tooltip>
</label>
<clr-checkbox-wrapper>
<input type="checkbox" clrCheckbox name="oidc_verify_cert" id="oidc_verify_cert"
[disabled]="disabled(currentConfig.oidc_verify_cert)"
[(ngModel)]="currentConfig.oidc_verify_cert.value" />
</clr-checkbox-wrapper>
</clr-checkbox-container>
</clr-input-container>
<clr-checkbox-container>
<label for="oidc_verify_cert">{{'CONFIG.OIDC.OIDC_VERIFYCERT' | translate}}
<clr-tooltip>
<clr-icon clrTooltipTrigger shape="info-circle" size="24"></clr-icon>
<clr-tooltip-content clrPosition="top-right" clrSize="lg" *clrIfOpen>
<span>{{'TOOLTIP.OIDC_VERIFYCERT' | translate}}</span>
</clr-tooltip-content>
</clr-tooltip>
</label>
<clr-checkbox-wrapper>
<input type="checkbox" clrCheckbox name="oidc_verify_cert" id="oidc_verify_cert"
[disabled]="disabled(currentConfig.oidc_verify_cert)"
[(ngModel)]="currentConfig.oidc_verify_cert.value" />
</clr-checkbox-wrapper>
</clr-checkbox-container>
<clr-checkbox-container>
<label for="oidcAutoOnboard">{{'CONFIG.OIDC.OIDC_AUTOONBOARD' | translate}}
<clr-tooltip>
<clr-icon clrTooltipTrigger shape="info-circle" size="24"></clr-icon>
<clr-tooltip-content clrPosition="top-right" clrSize="lg" *clrIfOpen>
<span>{{'TOOLTIP.OIDC_AUTOONBOARD' | translate}}</span>
</clr-tooltip-content>
</clr-tooltip>
</label>
<clr-checkbox-wrapper>
<input type="checkbox" clrCheckbox name="oidcAutoOnboard" id="oidcAutoOnboard"
[disabled]="disabled(currentConfig.oidc_auto_onboard)"
[(ngModel)]="currentConfig.oidc_auto_onboard.value" />
</clr-checkbox-wrapper>
</clr-checkbox-container>
<clr-input-container>
<label for="oidcUserClaim">{{'CONFIG.OIDC.USER_CLAIM' | translate}}
<clr-tooltip>
<clr-icon clrTooltipTrigger shape="info-circle" size="24"></clr-icon>
<clr-tooltip-content clrPosition="top-right" clrSize="lg" *clrIfOpen>
<span>{{'TOOLTIP.OIDC_USER_CLAIM' | translate}}</span>
</clr-tooltip-content>
</clr-tooltip>
</label>
<input clrInput name="oidcUserClaim" type="text" #oidcUserClaimInput="ngModel"
[(ngModel)]="currentConfig.oidc_user_claim.value" id="oidcUserClaim" size="40"
[disabled]="disabled(currentConfig.oidc_user_claim)" pattern="^[a-zA-Z0-9_-]*$">
</clr-input-container>
<div class="oidc-tip">{{ 'CONFIG.OIDC.OIDC_REDIREC_URL' | translate}}
<span>{{redirectUrl}}/c/oidc/callback</span></div>
<span>{{redirectUrl}}/c/oidc/callback</span>
</div>
</section>
</form>
<div>

View File

@ -102,6 +102,8 @@
"OIDC_VERIFYCERT": "Uncheck this box if your OIDC server is hosted via self-signed certificate.",
"OIDC_GROUP_CLAIM": "The name of Claim in the ID token whose value is the list of group names.",
"OIDC_GROUP_CLAIM_WARNING": "It can only contain letters, numbers, underscores, and the input length is no more than 256 characters.",
"OIDC_AUTOONBOARD": "Skip the onboarding screen, so user cannot change its username. Username is provided from ID Token",
"OIDC_USER_CLAIM": "The name of the claim in the ID Token where the username is retrieved from. If not specified, it will default to 'name'",
"NEW_SECRET": "The secret must longer than 8 chars with at least 1 uppercase letter, 1 lowercase letter and 1 number"
},
"PLACEHOLDER": {
@ -911,6 +913,8 @@
"CLIENTSECRET": "OIDC Client Secret",
"SCOPE": "OIDC Scope",
"OIDC_VERIFYCERT": "Verify Certificate",
"OIDC_AUTOONBOARD": "Automatic onboarding",
"USER_CLAIM": "Username Claim",
"OIDC_SETNAME": "Set OIDC Username",
"OIDC_SETNAMECONTENT": "You must create a Harbor username the first time when authenticating via a third party(OIDC).This will be used within Harbor to be associated with projects, roles, etc.",
"OIDC_USERNAME": "Username",

View File

@ -98,7 +98,9 @@ export class Configuration {
oidc_client_id?: StringValueItem;
oidc_client_secret?: StringValueItem;
oidc_verify_cert?: BoolValueItem;
oidc_auto_onboard?: BoolValueItem;
oidc_scope?: StringValueItem;
oidc_user_claim?: StringValueItem;
count_per_project: NumberValueItem;
storage_per_project: NumberValueItem;
cfg_expiration: NumberValueItem;
@ -155,8 +157,10 @@ export class Configuration {
this.oidc_client_id = new StringValueItem('', true);
this.oidc_client_secret = new StringValueItem('', true);
this.oidc_verify_cert = new BoolValueItem(false, true);
this.oidc_auto_onboard = new BoolValueItem(false, true);
this.oidc_scope = new StringValueItem('', true);
this.oidc_groups_claim = new StringValueItem('', true);
this.oidc_user_claim = new StringValueItem('', true);
this.count_per_project = new NumberValueItem(-1, true);
this.storage_per_project = new NumberValueItem(-1, true);
}

View File

@ -59,11 +59,23 @@ type adapter struct {
// Info gets info about Huawei SWR
func (a *adapter) Info() (*model.RegistryInfo, error) {
registryInfo := model.RegistryInfo{
Type: model.RegistryTypeHuawei,
Description: "Adapter for SWR -- The image registry of Huawei Cloud",
SupportedResourceTypes: []model.ResourceType{model.ResourceTypeImage},
SupportedResourceFilters: []*model.FilterStyle{},
SupportedTriggers: []model.TriggerType{},
Type: model.RegistryTypeHuawei,
Description: "Adapter for SWR -- The image registry of Huawei Cloud",
SupportedResourceTypes: []model.ResourceType{model.ResourceTypeImage},
SupportedResourceFilters: []*model.FilterStyle{
{
Type: model.FilterTypeName,
Style: model.FilterStyleTypeText,
},
{
Type: model.FilterTypeTag,
Style: model.FilterStyleTypeText,
},
},
SupportedTriggers: []model.TriggerType{
model.TriggerTypeManual,
model.TriggerTypeScheduled,
},
}
return &registryInfo, nil
}

View File

@ -20,7 +20,9 @@ import (
"strconv"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib/errors"
libhttp "github.com/goharbor/harbor/src/lib/http"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/task"
"github.com/goharbor/harbor/src/server/router"
)
@ -52,6 +54,11 @@ func (j *jobStatusHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
if err = j.handler.Handle(r.Context(), taskID, sc); err != nil {
// ignore the not found error to avoid the jobservice re-sending the hook
if errors.IsNotFoundErr(err) {
log.Warningf("got the status change hook for a non existing task %d", taskID)
return
}
libhttp.SendError(w, err)
return
}

View File

@ -11,6 +11,7 @@ import (
"github.com/goharbor/harbor/src/server/middleware"
"github.com/goharbor/harbor/src/server/middleware/requestid"
"net/http"
"time"
)
// HeadBlobMiddleware intercept the head blob request
@ -40,12 +41,21 @@ func handleHead(req *http.Request) error {
switch bb.Status {
case blob_models.StatusNone, blob_models.StatusDelete:
err := blob.Ctl.Touch(req.Context(), bb)
if err != nil {
if err := blob.Ctl.Touch(req.Context(), bb); err != nil {
log.Errorf("failed to update blob: %s status to StatusNone, error:%v", blobInfo.Digest, err)
return errors.Wrapf(err, fmt.Sprintf("the request id is: %s", req.Header.Get(requestid.HeaderXRequestID)))
}
case blob_models.StatusDeleting, blob_models.StatusDeleteFailed:
case blob_models.StatusDeleting:
now := time.Now().UTC()
// if the deleting exceed 2 hours, marks the blob as StatusDeleteFailed and gives a 404, so client can push it again
if now.Sub(bb.UpdateTime) > time.Duration(BlobDeleteingTimeWindow)*time.Hour {
if err := blob.Ctl.Fail(req.Context(), bb); err != nil {
log.Errorf("failed to update blob: %s status to StatusDeleteFailed, error:%v", blobInfo.Digest, err)
return errors.Wrapf(err, fmt.Sprintf("the request id is: %s", req.Header.Get(requestid.HeaderXRequestID)))
}
}
return errors.New(nil).WithMessage(fmt.Sprintf("the asking blob is delete failed, mark it as non existing, request id: %s", req.Header.Get(requestid.HeaderXRequestID))).WithCode(errors.NotFoundCode)
case blob_models.StatusDeleteFailed:
return errors.New(nil).WithMessage(fmt.Sprintf("the asking blob is in GC, mark it as non existing, request id: %s", req.Header.Get(requestid.HeaderXRequestID))).WithCode(errors.NotFoundCode)
default:
return errors.New(nil).WithMessage(fmt.Sprintf("wrong blob status, %s", bb.Status))

View File

@ -2,13 +2,18 @@ package blob
import (
"fmt"
"github.com/goharbor/harbor/src/controller/blob"
"github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/blob/models"
"github.com/goharbor/harbor/src/server/middleware/requestid"
"net/http"
"time"
)
// BlobDeleteingTimeWindow is the time window used in GC to reserve blobs
const BlobDeleteingTimeWindow = 2
// probeBlob handles config/layer and manifest status in the PUT Blob & Manifest middleware, and update the status before it passed into proxy(distribution).
func probeBlob(r *http.Request, digest string) error {
logger := log.G(r.Context())
@ -24,14 +29,22 @@ func probeBlob(r *http.Request, digest string) error {
switch bb.Status {
case models.StatusNone, models.StatusDelete, models.StatusDeleteFailed:
err := blobController.Touch(r.Context(), bb)
if err != nil {
if err := blobController.Touch(r.Context(), bb); err != nil {
logger.Errorf("failed to update blob: %s status to StatusNone, error:%v", bb.Digest, err)
return errors.Wrapf(err, fmt.Sprintf("the request id is: %s", r.Header.Get(requestid.HeaderXRequestID)))
}
case models.StatusDeleting:
logger.Warningf(fmt.Sprintf("the asking blob is in GC, mark it as non existing, request id: %s", r.Header.Get(requestid.HeaderXRequestID)))
return errors.New(nil).WithMessage(fmt.Sprintf("the asking blob is in GC, mark it as non existing, request id: %s", r.Header.Get(requestid.HeaderXRequestID))).WithCode(errors.NotFoundCode)
now := time.Now().UTC()
// if the deleting exceed 2 hours, marks the blob as StatusDeleteFailed
if now.Sub(bb.UpdateTime) > time.Duration(BlobDeleteingTimeWindow)*time.Hour {
if err := blob.Ctl.Fail(r.Context(), bb); err != nil {
log.Errorf("failed to update blob: %s status to StatusDeleteFailed, error:%v", bb.Digest, err)
return errors.Wrapf(err, fmt.Sprintf("the request id is: %s", r.Header.Get(requestid.HeaderXRequestID)))
}
// StatusDeleteFailed => StatusNone, and then let the proxy to handle manifest upload
return probeBlob(r, digest)
}
return errors.New(nil).WithMessage(fmt.Sprintf("the asking blob is delete failed, mark it as non existing, request id: %s", r.Header.Get(requestid.HeaderXRequestID))).WithCode(errors.NotFoundCode)
default:
return nil
}

View File

@ -73,7 +73,11 @@ func handleBlob(w http.ResponseWriter, r *http.Request, next http.Handler) error
}
func preCheck(ctx context.Context) (art lib.ArtifactInfo, p *models.Project, ctl proxy.Controller, err error) {
none := lib.ArtifactInfo{}
art = lib.GetArtifactInfo(ctx)
if art == none {
return none, nil, nil, errors.New("artifactinfo is not found").WithCode(errors.NotFoundCode)
}
ctl = proxy.ControllerInstance()
p, err = project.Ctl.GetByName(ctx, art.ProjectName, project.Metadata(false))
return

View File

@ -18,6 +18,7 @@ import (
"context"
"fmt"
"github.com/goharbor/harbor/src/controller/event/metadata"
"github.com/goharbor/harbor/src/controller/project"
"github.com/goharbor/harbor/src/pkg/notification"
"net/http"
"strings"
@ -48,6 +49,7 @@ const (
func newArtifactAPI() *artifactAPI {
return &artifactAPI{
artCtl: artifact.Ctl,
proCtl: project.Ctl,
repoCtl: repository.Ctl,
scanCtl: scan.DefaultController,
tagCtl: tag.Ctl,
@ -57,6 +59,7 @@ func newArtifactAPI() *artifactAPI {
type artifactAPI struct {
BaseAPI
artCtl artifact.Controller
proCtl project.Controller
repoCtl repository.Controller
scanCtl scan.Controller
tagCtl tag.Controller
@ -152,6 +155,15 @@ func (a *artifactAPI) CopyArtifact(ctx context.Context, params operation.CopyArt
return a.SendError(ctx, err)
}
pro, err := a.proCtl.GetByName(ctx, params.ProjectName)
if err != nil {
return a.SendError(ctx, err)
}
if pro.RegistryID > 0 {
return a.SendError(ctx, errors.New(nil).WithCode(errors.MethodNotAllowedCode).
WithMessage("cannot copy the artifact to a proxy cache project"))
}
srcRepo, ref, err := parse(params.From)
if err != nil {
return a.SendError(ctx, err)

View File

@ -145,6 +145,20 @@ func (_m *Controller) Exist(ctx context.Context, digest string, options ...blob.
return r0, r1
}
// Fail provides a mock function with given fields: ctx, _a1
func (_m *Controller) Fail(ctx context.Context, _a1 *models.Blob) error {
ret := _m.Called(ctx, _a1)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *models.Blob) error); ok {
r0 = rf(ctx, _a1)
} else {
r0 = ret.Error(0)
}
return r0
}
// FindMissingAssociationsForProject provides a mock function with given fields: ctx, projectID, blobs
func (_m *Controller) FindMissingAssociationsForProject(ctx context.Context, projectID int64, blobs []*models.Blob) ([]*models.Blob, error) {
ret := _m.Called(ctx, projectID, blobs)

View File

@ -221,13 +221,13 @@ func (_m *Manager) UpdateBlobStatus(ctx context.Context, _a1 *models.Blob) (int6
return r0, r1
}
// UselessBlobs provides a mock function with given fields: ctx, timeWindow
func (_m *Manager) UselessBlobs(ctx context.Context, timeWindow int64) ([]*models.Blob, error) {
ret := _m.Called(ctx, timeWindow)
// UselessBlobs provides a mock function with given fields: ctx, timeWindowHours
func (_m *Manager) UselessBlobs(ctx context.Context, timeWindowHours int64) ([]*models.Blob, error) {
ret := _m.Called(ctx, timeWindowHours)
var r0 []*models.Blob
if rf, ok := ret.Get(0).(func(context.Context, int64) []*models.Blob); ok {
r0 = rf(ctx, timeWindow)
r0 = rf(ctx, timeWindowHours)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]*models.Blob)
@ -236,7 +236,7 @@ func (_m *Manager) UselessBlobs(ctx context.Context, timeWindow int64) ([]*model
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
r1 = rf(ctx, timeWindow)
r1 = rf(ctx, timeWindowHours)
} else {
r1 = ret.Error(1)
}

View File

@ -0,0 +1,17 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
//go:generate mockery -dir ../../../pkg/scheduler -name Scheduler -output . -outpkg scheduler

View File

@ -1,77 +1,73 @@
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by mockery v1.1.2. DO NOT EDIT.
package scheduler
import (
"fmt"
context "context"
"github.com/goharbor/harbor/src/pkg/scheduler/model"
scheduler "github.com/goharbor/harbor/src/pkg/scheduler"
mock "github.com/stretchr/testify/mock"
)
// FakeManager ...
type FakeManager struct {
idCounter int64
Schedules []*model.Schedule
// Scheduler is an autogenerated mock type for the Scheduler type
type Scheduler struct {
mock.Mock
}
// Create ...
func (f *FakeManager) Create(schedule *model.Schedule) (int64, error) {
f.idCounter++
id := f.idCounter
schedule.ID = id
f.Schedules = append(f.Schedules, schedule)
return id, nil
}
// GetSchedule provides a mock function with given fields: ctx, id
func (_m *Scheduler) GetSchedule(ctx context.Context, id int64) (*scheduler.Schedule, error) {
ret := _m.Called(ctx, id)
// Update ...
func (f *FakeManager) Update(schedule *model.Schedule, props ...string) error {
for i, sch := range f.Schedules {
if sch.ID == schedule.ID {
f.Schedules[i] = schedule
return nil
var r0 *scheduler.Schedule
if rf, ok := ret.Get(0).(func(context.Context, int64) *scheduler.Schedule); ok {
r0 = rf(ctx, id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*scheduler.Schedule)
}
}
return fmt.Errorf("the execution %d not found", schedule.ID)
}
// Delete ...
func (f *FakeManager) Delete(id int64) error {
length := len(f.Schedules)
for i, sch := range f.Schedules {
if sch.ID == id {
f.Schedules = f.Schedules[:i]
if i != length-1 {
f.Schedules = append(f.Schedules, f.Schedules[i+1:]...)
}
return nil
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
r1 = rf(ctx, id)
} else {
r1 = ret.Error(1)
}
return fmt.Errorf("the execution %d not found", id)
return r0, r1
}
// Get ...
func (f *FakeManager) Get(id int64) (*model.Schedule, error) {
for _, sch := range f.Schedules {
if sch.ID == id {
return sch, nil
}
// Schedule provides a mock function with given fields: ctx, cron, callbackFuncName, params
func (_m *Scheduler) Schedule(ctx context.Context, cron string, callbackFuncName string, params interface{}) (int64, error) {
ret := _m.Called(ctx, cron, callbackFuncName, params)
var r0 int64
if rf, ok := ret.Get(0).(func(context.Context, string, string, interface{}) int64); ok {
r0 = rf(ctx, cron, callbackFuncName, params)
} else {
r0 = ret.Get(0).(int64)
}
return nil, fmt.Errorf("the execution %d not found", id)
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, string, string, interface{}) error); ok {
r1 = rf(ctx, cron, callbackFuncName, params)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// List ...
func (f *FakeManager) List(...*model.ScheduleQuery) ([]*model.Schedule, error) {
return f.Schedules, nil
// UnSchedule provides a mock function with given fields: ctx, id
func (_m *Scheduler) UnSchedule(ctx context.Context, id int64) error {
ret := _m.Called(ctx, id)
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
r0 = rf(ctx, id)
} else {
r0 = ret.Error(0)
}
return r0
}

24
src/vendor/github.com/FZambia/sentinel/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,4 +1,3 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
@ -173,3 +172,30 @@
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

39
src/vendor/github.com/FZambia/sentinel/README.md generated vendored Normal file
View File

@ -0,0 +1,39 @@
go-sentinel
===========
Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library.
Documentation
-------------
- [API Reference](http://godoc.org/github.com/FZambia/sentinel)
Alternative solution
--------------------
You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA:
```
listen redis
server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 on-marked-down shutdown-sessions on-marked-up shutdown-backup-sessions
server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup
bind *:6379
mode tcp
option tcpka
option tcplog
option tcp-check
tcp-check send PING\r\n
tcp-check expect string +PONG
tcp-check send info\ replication\r\n
tcp-check expect string role:master
tcp-check send QUIT\r\n
tcp-check expect string +OK
balance roundrobin
```
This way you don't need to use this library.
License
-------
Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).

Some files were not shown because too many files have changed in this diff Show More