mirror of
https://github.com/goharbor/harbor.git
synced 2024-12-22 08:38:03 +01:00
Apply consistent format for comments
Signed-off-by: 陈德 <chende@caicloud.io>
This commit is contained in:
parent
4d601292d1
commit
0582db9a82
@ -256,13 +256,13 @@ func parseStringToBool(str string) (interface{}, error) {
|
||||
// Init system configurations. If env RESET is set or configurations
|
||||
// read from storage driver is null, load all configurations from env
|
||||
func Init() (err error) {
|
||||
//init database
|
||||
// init database
|
||||
envCfgs := map[string]interface{}{}
|
||||
if err := LoadFromEnv(envCfgs, true); err != nil {
|
||||
return err
|
||||
}
|
||||
db := GetDatabaseFromCfg(envCfgs)
|
||||
//Initialize the schema, then register the DB.
|
||||
// Initialize the schema, then register the DB.
|
||||
if err := dao.UpgradeSchema(db); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -273,7 +273,7 @@ func Init() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
//Use reload key to avoid reset customed setting after restart
|
||||
// Use reload key to avoid reset customed setting after restart
|
||||
curCfgs, err := CfgStore.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -282,8 +282,8 @@ func Init() (err error) {
|
||||
if curCfgs == nil {
|
||||
curCfgs = map[string]interface{}{}
|
||||
}
|
||||
//restart: only repeatload envs will be load
|
||||
//reload_config: all envs will be reload except the skiped envs
|
||||
// restart: only repeatload envs will be load
|
||||
// reload_config: all envs will be reload except the skiped envs
|
||||
if err = LoadFromEnv(curCfgs, loadAll); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -312,7 +312,7 @@ func initCfgStore() (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//migration check: if no data in the db , then will try to load from path
|
||||
// migration check: if no data in the db , then will try to load from path
|
||||
m, err := CfgStore.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -453,7 +453,7 @@ func validLdapScope(cfg map[string]interface{}, isMigrate bool) {
|
||||
|
||||
}
|
||||
|
||||
//AddMissedKey ... If the configure key is missing in the cfg map, add default value to it
|
||||
// AddMissedKey ... If the configure key is missing in the cfg map, add default value to it
|
||||
func AddMissedKey(cfg map[string]interface{}) {
|
||||
|
||||
for k, v := range common.HarborStringKeysMap {
|
||||
|
@ -199,7 +199,7 @@ func TestLoadFromEnvWithReloadConfigSkipPattern(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load From env: %v", err)
|
||||
}
|
||||
assert.Equal(t, "ldap_url", cfgsReload[common.LDAPURL]) //env value ignored
|
||||
assert.Equal(t, "ldap_url", cfgsReload[common.LDAPURL]) // env value ignored
|
||||
|
||||
os.Clearenv()
|
||||
|
||||
|
@ -4,14 +4,14 @@ import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//BaseHandler defines the handlers related with the chart server itself.
|
||||
// BaseHandler defines the handlers related with the chart server itself.
|
||||
type BaseHandler struct {
|
||||
//Proxy used to to transfer the traffic of requests
|
||||
//It's mainly used to talk to the backend chart server
|
||||
// Proxy used to to transfer the traffic of requests
|
||||
// It's mainly used to talk to the backend chart server
|
||||
trafficProxy *ProxyEngine
|
||||
}
|
||||
|
||||
//GetHealthStatus will return the health status of the backend chart repository server
|
||||
// GetHealthStatus will return the health status of the backend chart repository server
|
||||
func (bh *BaseHandler) GetHealthStatus(w http.ResponseWriter, req *http.Request) {
|
||||
bh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
@ -8,50 +8,50 @@ import (
|
||||
beego_cache "github.com/astaxie/beego/cache"
|
||||
hlog "github.com/goharbor/harbor/src/common/utils/log"
|
||||
|
||||
//Enable redis cache adaptor
|
||||
// Enable redis cache adaptor
|
||||
_ "github.com/astaxie/beego/cache/redis"
|
||||
)
|
||||
|
||||
const (
|
||||
standardExpireTime = 3600 * time.Second
|
||||
redisENVKey = "_REDIS_URL"
|
||||
cacheDriverENVKey = "CHART_CACHE_DRIVER" //"memory" or "redis"
|
||||
cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis"
|
||||
cacheDriverMem = "memory"
|
||||
cacheDriverRedis = "redis"
|
||||
cacheCollectionName = "helm_chart_cache"
|
||||
)
|
||||
|
||||
//ChartCache is designed to cache some processed data for repeated accessing
|
||||
//to improve the performance
|
||||
// ChartCache is designed to cache some processed data for repeated accessing
|
||||
// to improve the performance
|
||||
type ChartCache struct {
|
||||
//Cache driver
|
||||
// Cache driver
|
||||
cache beego_cache.Cache
|
||||
|
||||
//Keep the driver type
|
||||
// Keep the driver type
|
||||
driverType string
|
||||
|
||||
//To indicate if the chart cache is enabled
|
||||
// To indicate if the chart cache is enabled
|
||||
isEnabled bool
|
||||
}
|
||||
|
||||
//ChartCacheConfig keeps the configurations of ChartCache
|
||||
// ChartCacheConfig keeps the configurations of ChartCache
|
||||
type ChartCacheConfig struct {
|
||||
//Only support 'in-memory' and 'redis' now
|
||||
// Only support 'in-memory' and 'redis' now
|
||||
DriverType string
|
||||
|
||||
//Align with config
|
||||
// Align with config
|
||||
Config string
|
||||
}
|
||||
|
||||
//NewChartCache is constructor of ChartCache
|
||||
//If return nil, that means no cache is enabled for chart repository server
|
||||
// NewChartCache is constructor of ChartCache
|
||||
// If return nil, that means no cache is enabled for chart repository server
|
||||
func NewChartCache(config *ChartCacheConfig) *ChartCache {
|
||||
//Never return nil object
|
||||
// Never return nil object
|
||||
chartCache := &ChartCache{
|
||||
isEnabled: false,
|
||||
}
|
||||
|
||||
//Double check the configurations are what we want
|
||||
// Double check the configurations are what we want
|
||||
if config == nil {
|
||||
return chartCache
|
||||
}
|
||||
@ -66,13 +66,13 @@ func NewChartCache(config *ChartCacheConfig) *ChartCache {
|
||||
}
|
||||
}
|
||||
|
||||
//Try to create the upstream cache
|
||||
// Try to create the upstream cache
|
||||
cache := initCacheDriver(config)
|
||||
if cache == nil {
|
||||
return chartCache
|
||||
}
|
||||
|
||||
//Cache enabled
|
||||
// Cache enabled
|
||||
chartCache.isEnabled = true
|
||||
chartCache.driverType = config.DriverType
|
||||
chartCache.cache = cache
|
||||
@ -80,74 +80,74 @@ func NewChartCache(config *ChartCacheConfig) *ChartCache {
|
||||
return chartCache
|
||||
}
|
||||
|
||||
//IsEnabled to indicate if the chart cache is successfully enabled
|
||||
//The cache may be disabled if
|
||||
// IsEnabled to indicate if the chart cache is successfully enabled
|
||||
// The cache may be disabled if
|
||||
// user does not set
|
||||
// wrong configurations
|
||||
func (chc *ChartCache) IsEnabled() bool {
|
||||
return chc.isEnabled
|
||||
}
|
||||
|
||||
//PutChart caches the detailed data of chart version
|
||||
// PutChart caches the detailed data of chart version
|
||||
func (chc *ChartCache) PutChart(chart *ChartVersionDetails) {
|
||||
//If cache is not enabled, do nothing
|
||||
// If cache is not enabled, do nothing
|
||||
if !chc.IsEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
//As it's a valid json data anymore when retrieving back from redis cache,
|
||||
//here we use separate methods to handle the data according to the driver type
|
||||
// As it's a valid json data anymore when retrieving back from redis cache,
|
||||
// here we use separate methods to handle the data according to the driver type
|
||||
if chart != nil {
|
||||
var err error
|
||||
|
||||
switch chc.driverType {
|
||||
case cacheDriverMem:
|
||||
//Directly put object in
|
||||
// Directly put object in
|
||||
err = chc.cache.Put(chart.Metadata.Digest, chart, standardExpireTime)
|
||||
case cacheDriverRedis:
|
||||
//Marshal to json data before saving
|
||||
// Marshal to json data before saving
|
||||
var jsonData []byte
|
||||
if jsonData, err = json.Marshal(chart); err == nil {
|
||||
err = chc.cache.Put(chart.Metadata.Digest, jsonData, standardExpireTime)
|
||||
}
|
||||
default:
|
||||
//Should not reach here, but still put guard code here
|
||||
// Should not reach here, but still put guard code here
|
||||
err = errors.New("Meet invalid cache driver")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
//Just logged
|
||||
// Just logged
|
||||
hlog.Errorf("Failed to cache chart object with error: %s\n", err)
|
||||
hlog.Warningf("If cache driver is using 'redis', please check the related configurations or the network connection")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//GetChart trys to retrieve it from the cache
|
||||
//If hit, return the cached item;
|
||||
//otherwise, nil object is returned
|
||||
// GetChart trys to retrieve it from the cache
|
||||
// If hit, return the cached item;
|
||||
// otherwise, nil object is returned
|
||||
func (chc *ChartCache) GetChart(chartDigest string) *ChartVersionDetails {
|
||||
//If cache is not enabled, do nothing
|
||||
// If cache is not enabled, do nothing
|
||||
if !chc.IsEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
object := chc.cache.Get(chartDigest)
|
||||
if object != nil {
|
||||
//Try to convert data
|
||||
//First try the normal way
|
||||
// Try to convert data
|
||||
// First try the normal way
|
||||
if chartDetails, ok := object.(*ChartVersionDetails); ok {
|
||||
return chartDetails
|
||||
}
|
||||
|
||||
//Maybe json bytes
|
||||
// Maybe json bytes
|
||||
if bytes, yes := object.([]byte); yes {
|
||||
chartDetails := &ChartVersionDetails{}
|
||||
err := json.Unmarshal(bytes, chartDetails)
|
||||
if err == nil {
|
||||
return chartDetails
|
||||
}
|
||||
//Just logged the error
|
||||
// Just logged the error
|
||||
hlog.Errorf("Failed to retrieve chart from cache with error: %s", err)
|
||||
}
|
||||
}
|
||||
@ -155,7 +155,7 @@ func (chc *ChartCache) GetChart(chartDigest string) *ChartVersionDetails {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Initialize the cache driver based on the config
|
||||
// Initialize the cache driver based on the config
|
||||
func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
|
||||
switch cacheConfig.DriverType {
|
||||
case cacheDriverMem:
|
||||
@ -164,7 +164,7 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
|
||||
case cacheDriverRedis:
|
||||
redisCache, err := beego_cache.NewCache(cacheDriverRedis, cacheConfig.Config)
|
||||
if err != nil {
|
||||
//Just logged
|
||||
// Just logged
|
||||
hlog.Errorf("Failed to initialize redis cache: %s", err)
|
||||
return nil
|
||||
}
|
||||
@ -175,7 +175,7 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
|
||||
break
|
||||
}
|
||||
|
||||
//Any other cases
|
||||
// Any other cases
|
||||
hlog.Info("No cache is enabled for chart caching")
|
||||
return nil
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
//Test the no cache set scenario
|
||||
// Test the no cache set scenario
|
||||
func TestNoCache(t *testing.T) {
|
||||
chartCache := NewChartCache(nil)
|
||||
if chartCache == nil {
|
||||
@ -34,7 +34,7 @@ func TestNoCache(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test the in memory cache
|
||||
// Test the in memory cache
|
||||
func TestInMemoryCache(t *testing.T) {
|
||||
chartCache := NewChartCache(&ChartCacheConfig{
|
||||
DriverType: cacheDriverMem,
|
||||
@ -58,8 +58,8 @@ func TestInMemoryCache(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test redis cache
|
||||
//Failed to config redis cache and then use in memory instead
|
||||
// Test redis cache
|
||||
// Failed to config redis cache and then use in memory instead
|
||||
func TestRedisCache(t *testing.T) {
|
||||
redisConfigV := make(map[string]string)
|
||||
redisConfigV["key"] = cacheCollectionName
|
||||
|
@ -21,7 +21,7 @@ const (
|
||||
valuesFileName = "values.yaml"
|
||||
)
|
||||
|
||||
//ChartVersionDetails keeps the detailed data info of the chart version
|
||||
// ChartVersionDetails keeps the detailed data info of the chart version
|
||||
type ChartVersionDetails struct {
|
||||
Metadata *helm_repo.ChartVersion `json:"metadata"`
|
||||
Dependencies []*chartutil.Dependency `json:"dependencies"`
|
||||
@ -30,19 +30,19 @@ type ChartVersionDetails struct {
|
||||
Security *SecurityReport `json:"security"`
|
||||
}
|
||||
|
||||
//SecurityReport keeps the info related with security
|
||||
//e.g.: digital signature, vulnerability scanning etc.
|
||||
// SecurityReport keeps the info related with security
|
||||
// e.g.: digital signature, vulnerability scanning etc.
|
||||
type SecurityReport struct {
|
||||
Signature *DigitalSignature `json:"signature"`
|
||||
}
|
||||
|
||||
//DigitalSignature used to indicate if the chart has been signed
|
||||
// DigitalSignature used to indicate if the chart has been signed
|
||||
type DigitalSignature struct {
|
||||
Signed bool `json:"signed"`
|
||||
Provenance string `json:"prov_file"`
|
||||
}
|
||||
|
||||
//ChartInfo keeps the information of the chart
|
||||
// ChartInfo keeps the information of the chart
|
||||
type ChartInfo struct {
|
||||
Name string
|
||||
TotalVersions uint32 `json:"total_versions"`
|
||||
@ -54,27 +54,27 @@ type ChartInfo struct {
|
||||
Deprecated bool
|
||||
}
|
||||
|
||||
//ChartOperator is designed to process the contents of
|
||||
//the specified chart version to get more details
|
||||
// ChartOperator is designed to process the contents of
|
||||
// the specified chart version to get more details
|
||||
type ChartOperator struct{}
|
||||
|
||||
//GetChartDetails parse the details from the provided content bytes
|
||||
// GetChartDetails parse the details from the provided content bytes
|
||||
func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails, error) {
|
||||
if content == nil || len(content) == 0 {
|
||||
return nil, errors.New("zero content")
|
||||
}
|
||||
|
||||
//Load chart from in-memory content
|
||||
// Load chart from in-memory content
|
||||
reader := bytes.NewReader(content)
|
||||
chartData, err := chartutil.LoadArchive(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//Parse the requirements of chart
|
||||
// Parse the requirements of chart
|
||||
requirements, err := chartutil.LoadRequirements(chartData)
|
||||
if err != nil {
|
||||
//If no requirements.yaml, return empty dependency list
|
||||
// If no requirements.yaml, return empty dependency list
|
||||
if _, ok := err.(chartutil.ErrNoRequirementsFile); ok {
|
||||
requirements = &chartutil.Requirements{
|
||||
Dependencies: make([]*chartutil.Dependency, 0),
|
||||
@ -86,16 +86,16 @@ func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails,
|
||||
|
||||
var values map[string]interface{}
|
||||
files := make(map[string]string)
|
||||
//Parse values
|
||||
// Parse values
|
||||
if chartData.Values != nil {
|
||||
values = parseRawValues([]byte(chartData.Values.GetRaw()))
|
||||
if len(values) > 0 {
|
||||
//Append values.yaml file
|
||||
// Append values.yaml file
|
||||
files[valuesFileName] = chartData.Values.Raw
|
||||
}
|
||||
}
|
||||
|
||||
//Append other files like 'README.md'
|
||||
// Append other files like 'README.md'
|
||||
for _, v := range chartData.GetFiles() {
|
||||
if v.TypeUrl == readmeFileName {
|
||||
files[readmeFileName] = string(v.GetValue())
|
||||
@ -112,7 +112,7 @@ func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails,
|
||||
return theChart, nil
|
||||
}
|
||||
|
||||
//GetChartList returns a reorganized chart list
|
||||
// GetChartList returns a reorganized chart list
|
||||
func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
|
||||
if content == nil || len(content) == 0 {
|
||||
return nil, errors.New("zero content")
|
||||
@ -140,8 +140,8 @@ func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
//Sort the chart list by the updated time which is the create time
|
||||
//of the latest version of the chart.
|
||||
// Sort the chart list by the updated time which is the create time
|
||||
// of the latest version of the chart.
|
||||
sort.Slice(chartList, func(i, j int) bool {
|
||||
if chartList[i].Updated.Equal(chartList[j].Updated) {
|
||||
return strings.Compare(chartList[i].Name, chartList[j].Name) < 0
|
||||
@ -153,7 +153,7 @@ func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
|
||||
return chartList, nil
|
||||
}
|
||||
|
||||
//GetChartVersions returns the chart versions
|
||||
// GetChartVersions returns the chart versions
|
||||
func (cho *ChartOperator) GetChartVersions(content []byte) (helm_repo.ChartVersions, error) {
|
||||
if content == nil || len(content) == 0 {
|
||||
return nil, errors.New("zero content")
|
||||
@ -167,7 +167,7 @@ func (cho *ChartOperator) GetChartVersions(content []byte) (helm_repo.ChartVersi
|
||||
return chartVersions, nil
|
||||
}
|
||||
|
||||
//Get the latest and oldest chart versions
|
||||
// Get the latest and oldest chart versions
|
||||
func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_repo.ChartVersion, oldestChart *helm_repo.ChartVersion) {
|
||||
if len(chartVersions) == 1 {
|
||||
return chartVersions[0], chartVersions[0]
|
||||
@ -176,18 +176,18 @@ func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_r
|
||||
for _, chartVersion := range chartVersions {
|
||||
currentV, err := semver.NewVersion(chartVersion.Version)
|
||||
if err != nil {
|
||||
//ignore it, just logged
|
||||
// ignore it, just logged
|
||||
hlog.Warningf("Malformed semversion %s for the chart %s", chartVersion.Version, chartVersion.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
//Find latest chart
|
||||
// Find latest chart
|
||||
if latestChart == nil {
|
||||
latestChart = chartVersion
|
||||
} else {
|
||||
lVersion, err := semver.NewVersion(latestChart.Version)
|
||||
if err != nil {
|
||||
//ignore it, just logged
|
||||
// ignore it, just logged
|
||||
hlog.Warningf("Malformed semversion %s for the chart %s", latestChart.Version, chartVersion.Name)
|
||||
continue
|
||||
}
|
||||
@ -208,7 +208,7 @@ func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_r
|
||||
return latestChart, oldestChart
|
||||
}
|
||||
|
||||
//Parse the raw values to value map
|
||||
// Parse the raw values to value map
|
||||
func parseRawValues(rawValue []byte) map[string]interface{} {
|
||||
valueMap := make(map[string]interface{})
|
||||
|
||||
@ -226,7 +226,7 @@ func parseRawValues(rawValue []byte) map[string]interface{} {
|
||||
return valueMap
|
||||
}
|
||||
|
||||
//Recursively read value
|
||||
// Recursively read value
|
||||
func readValue(values map[string]interface{}, keyPrefix string, valueMap map[string]interface{}) {
|
||||
for key, value := range values {
|
||||
longKey := key
|
||||
|
@ -17,18 +17,18 @@ const (
|
||||
idleConnectionTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
//ChartClient is a http client to get the content from the external http server
|
||||
// ChartClient is a http client to get the content from the external http server
|
||||
type ChartClient struct {
|
||||
//HTTP client
|
||||
// HTTP client
|
||||
httpClient *http.Client
|
||||
|
||||
//Auth info
|
||||
// Auth info
|
||||
credentail *Credential
|
||||
}
|
||||
|
||||
//NewChartClient is constructor of ChartClient
|
||||
//credentail can be nil
|
||||
func NewChartClient(credentail *Credential) *ChartClient { //Create http client with customized timeouts
|
||||
// NewChartClient is constructor of ChartClient
|
||||
// credentail can be nil
|
||||
func NewChartClient(credentail *Credential) *ChartClient { // Create http client with customized timeouts
|
||||
client := &http.Client{
|
||||
Timeout: clientTimeout,
|
||||
Transport: &http.Transport{
|
||||
@ -43,7 +43,7 @@ func NewChartClient(credentail *Credential) *ChartClient { //Create http client
|
||||
}
|
||||
}
|
||||
|
||||
//GetContent get the bytes from the specified url
|
||||
// GetContent get the bytes from the specified url
|
||||
func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
|
||||
response, err := cc.sendRequest(addr, http.MethodGet, nil, []int{http.StatusOK})
|
||||
if err != nil {
|
||||
@ -59,13 +59,13 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
|
||||
return content, nil
|
||||
}
|
||||
|
||||
//DeleteContent sends deleting request to the addr to delete content
|
||||
// DeleteContent sends deleting request to the addr to delete content
|
||||
func (cc *ChartClient) DeleteContent(addr string) error {
|
||||
_, err := cc.sendRequest(addr, http.MethodDelete, nil, []int{http.StatusOK})
|
||||
return err
|
||||
}
|
||||
|
||||
//sendRequest sends requests to the addr with the specified spec
|
||||
// sendRequest sends requests to the addr with the specified spec
|
||||
func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader, expectedCodes []int) (*http.Response, error) {
|
||||
if len(strings.TrimSpace(addr)) == 0 {
|
||||
return nil, errors.New("empty url is not allowed")
|
||||
@ -81,7 +81,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//Set basic auth
|
||||
// Set basic auth
|
||||
if cc.credentail != nil {
|
||||
request.SetBasicAuth(cc.credentail.Username, cc.credentail.Password)
|
||||
}
|
||||
|
@ -15,58 +15,58 @@ const (
|
||||
passwordKey = "UI_SECRET"
|
||||
)
|
||||
|
||||
//Credential keeps the username and password for the basic auth
|
||||
// Credential keeps the username and password for the basic auth
|
||||
type Credential struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
//Controller is used to handle flows of related requests based on the corresponding handlers
|
||||
//A reverse proxy will be created and managed to proxy the related traffics between API and
|
||||
//backend chart server
|
||||
// Controller is used to handle flows of related requests based on the corresponding handlers
|
||||
// A reverse proxy will be created and managed to proxy the related traffics between API and
|
||||
// backend chart server
|
||||
type Controller struct {
|
||||
//The access endpoint of the backend chart repository server
|
||||
// The access endpoint of the backend chart repository server
|
||||
backendServerAddr *url.URL
|
||||
|
||||
//To cover the server info and status requests
|
||||
// To cover the server info and status requests
|
||||
baseHandler *BaseHandler
|
||||
|
||||
//To cover the chart repository requests
|
||||
// To cover the chart repository requests
|
||||
repositoryHandler *RepositoryHandler
|
||||
|
||||
//To cover all the manipulation requests
|
||||
// To cover all the manipulation requests
|
||||
manipulationHandler *ManipulationHandler
|
||||
|
||||
//To cover the other utility requests
|
||||
// To cover the other utility requests
|
||||
utilityHandler *UtilityHandler
|
||||
}
|
||||
|
||||
//NewController is constructor of the chartserver.Controller
|
||||
// NewController is constructor of the chartserver.Controller
|
||||
func NewController(backendServer *url.URL) (*Controller, error) {
|
||||
if backendServer == nil {
|
||||
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
|
||||
}
|
||||
|
||||
//Try to create credential
|
||||
// Try to create credential
|
||||
cred := &Credential{
|
||||
Username: userName,
|
||||
Password: os.Getenv(passwordKey),
|
||||
}
|
||||
|
||||
//Use customized reverse proxy
|
||||
// Use customized reverse proxy
|
||||
proxy := NewProxyEngine(backendServer, cred)
|
||||
|
||||
//Create http client with customized timeouts
|
||||
// Create http client with customized timeouts
|
||||
client := NewChartClient(cred)
|
||||
|
||||
//Initialize chart operator for use
|
||||
// Initialize chart operator for use
|
||||
operator := &ChartOperator{}
|
||||
|
||||
//Creat cache
|
||||
// Creat cache
|
||||
cacheCfg, err := getCacheConfig()
|
||||
if err != nil {
|
||||
//just log the error
|
||||
//will not break the whole flow if failed to create cache
|
||||
// just log the error
|
||||
// will not break the whole flow if failed to create cache
|
||||
hlog.Errorf("failed to get cache configuration with error: %s", err)
|
||||
}
|
||||
cache := NewChartCache(cacheCfg)
|
||||
@ -97,33 +97,33 @@ func NewController(backendServer *url.URL) (*Controller, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
//GetBaseHandler returns the reference of BaseHandler
|
||||
// GetBaseHandler returns the reference of BaseHandler
|
||||
func (c *Controller) GetBaseHandler() *BaseHandler {
|
||||
return c.baseHandler
|
||||
}
|
||||
|
||||
//GetRepositoryHandler returns the reference of RepositoryHandler
|
||||
// GetRepositoryHandler returns the reference of RepositoryHandler
|
||||
func (c *Controller) GetRepositoryHandler() *RepositoryHandler {
|
||||
return c.repositoryHandler
|
||||
}
|
||||
|
||||
//GetManipulationHandler returns the reference of ManipulationHandler
|
||||
// GetManipulationHandler returns the reference of ManipulationHandler
|
||||
func (c *Controller) GetManipulationHandler() *ManipulationHandler {
|
||||
return c.manipulationHandler
|
||||
}
|
||||
|
||||
//GetUtilityHandler returns the reference of UtilityHandler
|
||||
// GetUtilityHandler returns the reference of UtilityHandler
|
||||
func (c *Controller) GetUtilityHandler() *UtilityHandler {
|
||||
return c.utilityHandler
|
||||
}
|
||||
|
||||
//What's the cache driver if it is set
|
||||
// What's the cache driver if it is set
|
||||
func parseCacheDriver() (string, bool) {
|
||||
driver, ok := os.LookupEnv(cacheDriverENVKey)
|
||||
return strings.ToLower(driver), ok
|
||||
}
|
||||
|
||||
//Get and parse the configuration for the chart cache
|
||||
// Get and parse the configuration for the chart cache
|
||||
func getCacheConfig() (*ChartCacheConfig, error) {
|
||||
driver, isSet := parseCacheDriver()
|
||||
if !isSet {
|
||||
|
@ -12,14 +12,14 @@ import (
|
||||
helm_repo "k8s.io/helm/pkg/repo"
|
||||
)
|
||||
|
||||
//Prepare, start the mock servers
|
||||
// Prepare, start the mock servers
|
||||
func TestStartServers(t *testing.T) {
|
||||
if err := startMockServers(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
//Test /health
|
||||
// Test /health
|
||||
func TestGetHealthOfBaseHandler(t *testing.T) {
|
||||
content, err := httpClient.GetContent(fmt.Sprintf("%s/health", getTheAddrOfFrontServer()))
|
||||
if err != nil {
|
||||
@ -36,7 +36,7 @@ func TestGetHealthOfBaseHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Get /repo1/index.yaml
|
||||
// Get /repo1/index.yaml
|
||||
func TestGetIndexYamlByRepo(t *testing.T) {
|
||||
indexFile, err := getIndexYaml("/repo1/index.yaml")
|
||||
if err != nil {
|
||||
@ -48,7 +48,7 @@ func TestGetIndexYamlByRepo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test get /index.yaml
|
||||
// Test get /index.yaml
|
||||
func TestGetUnifiedYamlFile(t *testing.T) {
|
||||
indexFile, err := getIndexYaml("/index.yaml")
|
||||
if err != nil {
|
||||
@ -70,8 +70,8 @@ func TestGetUnifiedYamlFile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test download /:repo/charts/chart.tar
|
||||
//Use this case to test the proxy function
|
||||
// Test download /:repo/charts/chart.tar
|
||||
// Use this case to test the proxy function
|
||||
func TestDownloadChart(t *testing.T) {
|
||||
content, err := httpClient.GetContent(fmt.Sprintf("%s/repo1/charts/harbor-0.2.0.tgz", getTheAddrOfFrontServer()))
|
||||
if err != nil {
|
||||
@ -86,7 +86,7 @@ func TestDownloadChart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test get /api/:repo/charts
|
||||
// Test get /api/:repo/charts
|
||||
func TestRetrieveChartList(t *testing.T) {
|
||||
content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts", getTheAddrOfFrontServer()))
|
||||
if err != nil {
|
||||
@ -116,7 +116,7 @@ func TestRetrieveChartList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test get /api/:repo/charts/:chart_name/:version
|
||||
// Test get /api/:repo/charts/:chart_name/:version
|
||||
func TestGetChartVersion(t *testing.T) {
|
||||
content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/0.2.0", getTheAddrOfFrontServer()))
|
||||
if err != nil {
|
||||
@ -145,7 +145,7 @@ func TestGetChartVersion(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test get /api/:repo/charts/:chart_name/:version with none-existing version
|
||||
// Test get /api/:repo/charts/:chart_name/:version with none-existing version
|
||||
func TestGetChartVersionWithError(t *testing.T) {
|
||||
_, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/1.0.0", getTheAddrOfFrontServer()))
|
||||
if err == nil {
|
||||
@ -153,8 +153,8 @@ func TestGetChartVersionWithError(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Get /api/repo1/charts/harbor
|
||||
//401 will be rewritten to 500 with specified error
|
||||
// Get /api/repo1/charts/harbor
|
||||
// 401 will be rewritten to 500 with specified error
|
||||
func TestResponseRewrite(t *testing.T) {
|
||||
response, err := http.Get(fmt.Sprintf("%s/api/repo1/charts/harbor", getTheAddrOfFrontServer()))
|
||||
if err != nil {
|
||||
@ -185,12 +185,12 @@ func TestResponseRewrite(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Clear environments
|
||||
// Clear environments
|
||||
func TestStopServers(t *testing.T) {
|
||||
stopMockServers()
|
||||
}
|
||||
|
||||
//Utility method for getting index yaml file
|
||||
// Utility method for getting index yaml file
|
||||
func getIndexYaml(path string) (*helm_repo.IndexFile, error) {
|
||||
content, err := httpClient.GetContent(fmt.Sprintf("%s%s", getTheAddrOfFrontServer(), path))
|
||||
if err != nil {
|
||||
|
@ -15,34 +15,34 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//NamespaceContextKey is context key for the namespace
|
||||
// NamespaceContextKey is context key for the namespace
|
||||
NamespaceContextKey ContextKey = ":repo"
|
||||
)
|
||||
|
||||
//ContextKey is defined for add value in the context of http request
|
||||
// ContextKey is defined for add value in the context of http request
|
||||
type ContextKey string
|
||||
|
||||
//ManipulationHandler includes all the handler methods for the purpose of manipulating the
|
||||
//chart repository
|
||||
// ManipulationHandler includes all the handler methods for the purpose of manipulating the
|
||||
// chart repository
|
||||
type ManipulationHandler struct {
|
||||
//Proxy used to to transfer the traffic of requests
|
||||
//It's mainly used to talk to the backend chart server
|
||||
// Proxy used to to transfer the traffic of requests
|
||||
// It's mainly used to talk to the backend chart server
|
||||
trafficProxy *ProxyEngine
|
||||
|
||||
//Parse and process the chart version to provide required info data
|
||||
// Parse and process the chart version to provide required info data
|
||||
chartOperator *ChartOperator
|
||||
|
||||
//HTTP client used to call the realted APIs of the backend chart repositories
|
||||
// HTTP client used to call the realted APIs of the backend chart repositories
|
||||
apiClient *ChartClient
|
||||
|
||||
//Point to the url of the backend server
|
||||
// Point to the url of the backend server
|
||||
backendServerAddress *url.URL
|
||||
|
||||
//Cache the chart data
|
||||
// Cache the chart data
|
||||
chartCache *ChartCache
|
||||
}
|
||||
|
||||
//ListCharts lists all the charts under the specified namespace
|
||||
// ListCharts lists all the charts under the specified namespace
|
||||
func (mh *ManipulationHandler) ListCharts(w http.ResponseWriter, req *http.Request) {
|
||||
url := strings.TrimPrefix(req.URL.String(), "/")
|
||||
url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url)
|
||||
@ -68,14 +68,14 @@ func (mh *ManipulationHandler) ListCharts(w http.ResponseWriter, req *http.Reque
|
||||
writeJSONData(w, jsonData)
|
||||
}
|
||||
|
||||
//GetChart returns all the chart versions under the specified chart
|
||||
// GetChart returns all the chart versions under the specified chart
|
||||
func (mh *ManipulationHandler) GetChart(w http.ResponseWriter, req *http.Request) {
|
||||
mh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//GetChartVersion get the specified version for one chart
|
||||
//This handler should return the details of the chart version,
|
||||
//maybe including metadata,dependencies and values etc.
|
||||
// GetChartVersion get the specified version for one chart
|
||||
// This handler should return the details of the chart version,
|
||||
// maybe including metadata,dependencies and values etc.
|
||||
func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.Request) {
|
||||
chartV, err := mh.getChartVersion(req.URL.String())
|
||||
if err != nil {
|
||||
@ -83,8 +83,8 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
|
||||
return
|
||||
}
|
||||
|
||||
//Get and check namespace
|
||||
//even we get the data from cache
|
||||
// Get and check namespace
|
||||
// even we get the data from cache
|
||||
var namespace string
|
||||
|
||||
repoValue := req.Context().Value(NamespaceContextKey)
|
||||
@ -99,17 +99,17 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
|
||||
return
|
||||
}
|
||||
|
||||
//Query cache
|
||||
// Query cache
|
||||
chartDetails := mh.chartCache.GetChart(chartV.Digest)
|
||||
if chartDetails == nil {
|
||||
//NOT hit!!
|
||||
// NOT hit!!
|
||||
content, err := mh.getChartVersionContent(namespace, chartV.URLs[0])
|
||||
if err != nil {
|
||||
WriteInternalError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
//Process bytes and get more details of chart version
|
||||
// Process bytes and get more details of chart version
|
||||
chartDetails, err = mh.chartOperator.GetChartDetails(content)
|
||||
if err != nil {
|
||||
WriteInternalError(w, err)
|
||||
@ -117,35 +117,35 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
|
||||
}
|
||||
chartDetails.Metadata = chartV
|
||||
|
||||
//Put it into the cache for next access
|
||||
// Put it into the cache for next access
|
||||
mh.chartCache.PutChart(chartDetails)
|
||||
} else {
|
||||
//Just logged
|
||||
// Just logged
|
||||
hlog.Debugf("Get detailed data from cache for chart: %s:%s (%s)",
|
||||
chartDetails.Metadata.Name,
|
||||
chartDetails.Metadata.Version,
|
||||
chartDetails.Metadata.Digest)
|
||||
}
|
||||
//The change of prov file will not cause any influence to the digest of chart,
|
||||
//and then the digital signature status should be not cached
|
||||
// The change of prov file will not cause any influence to the digest of chart,
|
||||
// and then the digital signature status should be not cached
|
||||
//
|
||||
//Generate the security report
|
||||
//prov file share same endpoint with the chart version
|
||||
//Just add .prov suffix to the chart version to form the path of prov file
|
||||
//Anyway, there will be a report about the digital signature status
|
||||
// Generate the security report
|
||||
// prov file share same endpoint with the chart version
|
||||
// Just add .prov suffix to the chart version to form the path of prov file
|
||||
// Anyway, there will be a report about the digital signature status
|
||||
chartDetails.Security = &SecurityReport{
|
||||
Signature: &DigitalSignature{
|
||||
Signed: false,
|
||||
},
|
||||
}
|
||||
//Try to get the prov file to confirm if it is exitsing
|
||||
// Try to get the prov file to confirm if it is exitsing
|
||||
provFilePath := fmt.Sprintf("%s.prov", chartV.URLs[0])
|
||||
provBytes, err := mh.getChartVersionContent(namespace, provFilePath)
|
||||
if err == nil && len(provBytes) > 0 {
|
||||
chartDetails.Security.Signature.Signed = true
|
||||
chartDetails.Security.Signature.Provenance = provFilePath
|
||||
} else {
|
||||
//Just log it
|
||||
// Just log it
|
||||
hlog.Errorf("Failed to get prov file for chart %s with error: %s, got %d bytes", chartV.Name, err.Error(), len(provBytes))
|
||||
}
|
||||
|
||||
@ -158,22 +158,22 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
|
||||
writeJSONData(w, bytes)
|
||||
}
|
||||
|
||||
//UploadChartVersion will save the new version of the chart to the backend storage
|
||||
// UploadChartVersion will save the new version of the chart to the backend storage
|
||||
func (mh *ManipulationHandler) UploadChartVersion(w http.ResponseWriter, req *http.Request) {
|
||||
mh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//UploadProvenanceFile will save the provenance file of the chart to the backend storage
|
||||
// UploadProvenanceFile will save the provenance file of the chart to the backend storage
|
||||
func (mh *ManipulationHandler) UploadProvenanceFile(w http.ResponseWriter, req *http.Request) {
|
||||
mh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//DeleteChartVersion will delete the specified version of the chart
|
||||
// DeleteChartVersion will delete the specified version of the chart
|
||||
func (mh *ManipulationHandler) DeleteChartVersion(w http.ResponseWriter, req *http.Request) {
|
||||
mh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//Get the basic metadata of chart version
|
||||
// Get the basic metadata of chart version
|
||||
func (mh *ManipulationHandler) getChartVersion(subPath string) (*helm_repo.ChartVersion, error) {
|
||||
url := fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), strings.TrimPrefix(subPath, "/"))
|
||||
|
||||
@ -190,7 +190,7 @@ func (mh *ManipulationHandler) getChartVersion(subPath string) (*helm_repo.Chart
|
||||
return chartVersion, nil
|
||||
}
|
||||
|
||||
//Get the content bytes of the chart version
|
||||
// Get the content bytes of the chart version
|
||||
func (mh *ManipulationHandler) getChartVersionContent(namespace string, subPath string) ([]byte, error) {
|
||||
url := path.Join(namespace, subPath)
|
||||
url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url)
|
||||
|
@ -22,62 +22,62 @@ const (
|
||||
maxWorkers = 10
|
||||
)
|
||||
|
||||
//RepositoryHandler defines all the handlers to handle the requests related with chart repository
|
||||
//e.g: index.yaml and downloading chart objects
|
||||
// RepositoryHandler defines all the handlers to handle the requests related with chart repository
|
||||
// e.g: index.yaml and downloading chart objects
|
||||
type RepositoryHandler struct {
|
||||
//Proxy used to to transfer the traffic of requests
|
||||
//It's mainly used to talk to the backend chart server
|
||||
// Proxy used to to transfer the traffic of requests
|
||||
// It's mainly used to talk to the backend chart server
|
||||
trafficProxy *ProxyEngine
|
||||
|
||||
//HTTP client used to call the realted APIs of the backend chart repositories
|
||||
// HTTP client used to call the realted APIs of the backend chart repositories
|
||||
apiClient *ChartClient
|
||||
|
||||
//Point to the url of the backend server
|
||||
// Point to the url of the backend server
|
||||
backendServerAddress *url.URL
|
||||
}
|
||||
|
||||
//Pass work to the workers
|
||||
//'index' is the location of processing namespace/project in the list
|
||||
// Pass work to the workers
|
||||
// 'index' is the location of processing namespace/project in the list
|
||||
type workload struct {
|
||||
index uint32
|
||||
}
|
||||
|
||||
//Result returned by worker
|
||||
// Result returned by worker
|
||||
type processedResult struct {
|
||||
namespace string
|
||||
indexFileOfRepo *helm_repo.IndexFile
|
||||
}
|
||||
|
||||
//GetIndexFileWithNS will read the index.yaml data under the specified namespace
|
||||
// GetIndexFileWithNS will read the index.yaml data under the specified namespace
|
||||
func (rh *RepositoryHandler) GetIndexFileWithNS(w http.ResponseWriter, req *http.Request) {
|
||||
rh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//GetIndexFile will read the index.yaml under all namespaces and merge them as a single one
|
||||
//Please be aware that, to support this function, the backend chart repository server should
|
||||
//enable multi-tenancies
|
||||
// GetIndexFile will read the index.yaml under all namespaces and merge them as a single one
|
||||
// Please be aware that, to support this function, the backend chart repository server should
|
||||
// enable multi-tenancies
|
||||
func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Request) {
|
||||
//Get project manager references
|
||||
// Get project manager references
|
||||
projectMgr, err := filter.GetProjectManager(req)
|
||||
if err != nil {
|
||||
WriteInternalError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
//Get all the projects
|
||||
// Get all the projects
|
||||
results, err := projectMgr.List(nil)
|
||||
if err != nil {
|
||||
WriteInternalError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
//If no projects existing, return empty index.yaml content immediately
|
||||
// If no projects existing, return empty index.yaml content immediately
|
||||
if results.Total == 0 {
|
||||
w.Write(emptyIndexFile())
|
||||
return
|
||||
}
|
||||
|
||||
//The final merged index file
|
||||
// The final merged index file
|
||||
mergedIndexFile := &helm_repo.IndexFile{
|
||||
APIVersion: "v1",
|
||||
Entries: make(map[string]helm_repo.ChartVersions),
|
||||
@ -85,20 +85,20 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
|
||||
PublicKeys: []string{},
|
||||
}
|
||||
|
||||
//Retrieve index.yaml for repositories
|
||||
// Retrieve index.yaml for repositories
|
||||
workerPool := make(chan *workload, maxWorkers)
|
||||
//Sync the output results from the retriever
|
||||
// Sync the output results from the retriever
|
||||
resultChan := make(chan *processedResult, 1)
|
||||
//Receive error
|
||||
// Receive error
|
||||
errorChan := make(chan error, 1)
|
||||
//Signal chan for merging work
|
||||
// Signal chan for merging work
|
||||
mergeDone := make(chan struct{}, 1)
|
||||
//Total projects/namespaces
|
||||
// Total projects/namespaces
|
||||
total := uint32(results.Total)
|
||||
//Track all the background threads
|
||||
// Track all the background threads
|
||||
waitGroup := new(sync.WaitGroup)
|
||||
|
||||
//Initialize
|
||||
// Initialize
|
||||
initialItemCount := maxWorkers
|
||||
if total < maxWorkers {
|
||||
initialItemCount = int(total)
|
||||
@ -107,11 +107,11 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
|
||||
workerPool <- &workload{uint32(i)}
|
||||
}
|
||||
|
||||
//Atomtic index
|
||||
// Atomtic index
|
||||
var indexRef uint32
|
||||
atomic.AddUint32(&indexRef, uint32(initialItemCount-1))
|
||||
|
||||
//Start the index files merging thread
|
||||
// Start the index files merging thread
|
||||
go func() {
|
||||
defer func() {
|
||||
mergeDone <- struct{}{}
|
||||
@ -122,8 +122,8 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
|
||||
}
|
||||
}()
|
||||
|
||||
//Retrieve the index files for the repositories
|
||||
//and blocking here
|
||||
// Retrieve the index files for the repositories
|
||||
// and blocking here
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
@ -131,16 +131,16 @@ LOOP:
|
||||
if work.index >= total {
|
||||
break LOOP
|
||||
}
|
||||
//Process
|
||||
//New one
|
||||
// Process
|
||||
// New one
|
||||
waitGroup.Add(1)
|
||||
namespace := results.Projects[work.index].Name
|
||||
go func(ns string) {
|
||||
//Return the worker back to the pool
|
||||
// Return the worker back to the pool
|
||||
defer func() {
|
||||
waitGroup.Done() //done
|
||||
waitGroup.Done() // done
|
||||
|
||||
//Put one. The invalid index will be treated as a signal to quit loop
|
||||
// Put one. The invalid index will be treated as a signal to quit loop
|
||||
nextOne := atomic.AddUint32(&indexRef, 1)
|
||||
workerPool <- &workload{nextOne}
|
||||
}()
|
||||
@ -151,39 +151,39 @@ LOOP:
|
||||
return
|
||||
}
|
||||
|
||||
//Output
|
||||
// Output
|
||||
resultChan <- &processedResult{
|
||||
namespace: ns,
|
||||
indexFileOfRepo: indexFile,
|
||||
}
|
||||
}(namespace)
|
||||
case err = <-errorChan:
|
||||
//Quit earlier
|
||||
// Quit earlier
|
||||
break LOOP
|
||||
case <-req.Context().Done():
|
||||
//Quit earlier
|
||||
// Quit earlier
|
||||
err = errors.New("request of getting index yaml file is aborted")
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
|
||||
//Hold util all the retrieving work are done
|
||||
// Hold util all the retrieving work are done
|
||||
waitGroup.Wait()
|
||||
|
||||
//close consumer channel
|
||||
// close consumer channel
|
||||
close(resultChan)
|
||||
|
||||
//Wait until merging thread quit
|
||||
// Wait until merging thread quit
|
||||
<-mergeDone
|
||||
|
||||
//All the threads are done
|
||||
//Met an error
|
||||
// All the threads are done
|
||||
// Met an error
|
||||
if err != nil {
|
||||
WriteInternalError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
//Remove duplicated keys in public key list
|
||||
// Remove duplicated keys in public key list
|
||||
hash := make(map[string]string)
|
||||
for _, key := range mergedIndexFile.PublicKeys {
|
||||
hash[key] = key
|
||||
@ -202,15 +202,15 @@ LOOP:
|
||||
w.Write(bytes)
|
||||
}
|
||||
|
||||
//DownloadChartObject will download the stored chart object to the client
|
||||
//e.g: helm install
|
||||
// DownloadChartObject will download the stored chart object to the client
|
||||
// e.g: helm install
|
||||
func (rh *RepositoryHandler) DownloadChartObject(w http.ResponseWriter, req *http.Request) {
|
||||
rh.trafficProxy.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//Get the index yaml file under the specified namespace from the backend server
|
||||
// Get the index yaml file under the specified namespace from the backend server
|
||||
func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.IndexFile, error) {
|
||||
//Join url path
|
||||
// Join url path
|
||||
url := path.Join(namespace, "index.yaml")
|
||||
url = fmt.Sprintf("%s/%s", rh.backendServerAddress.String(), url)
|
||||
hlog.Debugf("Getting index.yaml from '%s'", url)
|
||||
@ -220,7 +220,7 @@ func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.In
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//Traverse to index file object for merging
|
||||
// Traverse to index file object for merging
|
||||
indexFile := helm_repo.NewIndexFile()
|
||||
if err := yaml.Unmarshal(content, indexFile); err != nil {
|
||||
return nil, err
|
||||
@ -229,41 +229,41 @@ func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.In
|
||||
return indexFile, nil
|
||||
}
|
||||
|
||||
//Merge the content of mergingIndexFile to the baseIndex
|
||||
//The chart url should be without --chart-url prefix
|
||||
// Merge the content of mergingIndexFile to the baseIndex
|
||||
// The chart url should be without --chart-url prefix
|
||||
func (rh *RepositoryHandler) mergeIndexFile(namespace string,
|
||||
baseIndex *helm_repo.IndexFile,
|
||||
mergingIndexFile *helm_repo.IndexFile) {
|
||||
//Append entries
|
||||
// Append entries
|
||||
for chartName, chartVersions := range mergingIndexFile.Entries {
|
||||
nameWithNS := fmt.Sprintf("%s/%s", namespace, chartName)
|
||||
for _, version := range chartVersions {
|
||||
version.Name = nameWithNS
|
||||
//Currently there is only one url
|
||||
// Currently there is only one url
|
||||
for index, url := range version.URLs {
|
||||
version.URLs[index] = path.Join(namespace, url)
|
||||
}
|
||||
}
|
||||
|
||||
//Appended
|
||||
// Appended
|
||||
baseIndex.Entries[nameWithNS] = chartVersions
|
||||
}
|
||||
|
||||
//Update generated time
|
||||
// Update generated time
|
||||
if mergingIndexFile.Generated.After(baseIndex.Generated) {
|
||||
baseIndex.Generated = mergingIndexFile.Generated
|
||||
}
|
||||
|
||||
//Merge public keys
|
||||
// Merge public keys
|
||||
baseIndex.PublicKeys = append(baseIndex.PublicKeys, mergingIndexFile.PublicKeys...)
|
||||
}
|
||||
|
||||
//Generate empty index file
|
||||
// Generate empty index file
|
||||
func emptyIndexFile() []byte {
|
||||
emptyIndexFile := &helm_repo.IndexFile{}
|
||||
emptyIndexFile.Generated = time.Now()
|
||||
|
||||
//Ignore the error
|
||||
// Ignore the error
|
||||
rawData, _ := json.Marshal(emptyIndexFile)
|
||||
|
||||
return rawData
|
||||
|
@ -19,17 +19,17 @@ const (
|
||||
contentLengthHeader = "Content-Length"
|
||||
)
|
||||
|
||||
//ProxyEngine is used to proxy the related traffics
|
||||
// ProxyEngine is used to proxy the related traffics
|
||||
type ProxyEngine struct {
|
||||
//The backend target server the traffic will be forwarded to
|
||||
//Just in case we'll use it
|
||||
// The backend target server the traffic will be forwarded to
|
||||
// Just in case we'll use it
|
||||
backend *url.URL
|
||||
|
||||
//Use go reverse proxy as engine
|
||||
// Use go reverse proxy as engine
|
||||
engine *httputil.ReverseProxy
|
||||
}
|
||||
|
||||
//NewProxyEngine is constructor of NewProxyEngine
|
||||
// NewProxyEngine is constructor of NewProxyEngine
|
||||
func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
|
||||
return &ProxyEngine{
|
||||
backend: target,
|
||||
@ -43,17 +43,17 @@ func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
|
||||
}
|
||||
}
|
||||
|
||||
//ServeHTTP serves the incoming http requests
|
||||
// ServeHTTP serves the incoming http requests
|
||||
func (pe *ProxyEngine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
pe.engine.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//Overwrite the http requests
|
||||
// Overwrite the http requests
|
||||
func director(target *url.URL, cred *Credential, req *http.Request) {
|
||||
//Closure
|
||||
// Closure
|
||||
targetQuery := target.RawQuery
|
||||
|
||||
//Overwrite the request URL to the target path
|
||||
// Overwrite the request URL to the target path
|
||||
req.URL.Scheme = target.Scheme
|
||||
req.URL.Host = target.Host
|
||||
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
|
||||
@ -66,28 +66,28 @@ func director(target *url.URL, cred *Credential, req *http.Request) {
|
||||
req.Header.Set("User-Agent", agentHarbor)
|
||||
}
|
||||
|
||||
//Add authentication header if it is existing
|
||||
// Add authentication header if it is existing
|
||||
if cred != nil {
|
||||
req.SetBasicAuth(cred.Username, cred.Password)
|
||||
}
|
||||
}
|
||||
|
||||
//Modify the http response
|
||||
// Modify the http response
|
||||
func modifyResponse(res *http.Response) error {
|
||||
//Accept cases
|
||||
//Success or redirect
|
||||
// Accept cases
|
||||
// Success or redirect
|
||||
if res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusTemporaryRedirect {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Detect the 401 code, if it is,overwrite it to 500.
|
||||
//We also re-write the error content to structural error object
|
||||
// Detect the 401 code, if it is,overwrite it to 500.
|
||||
// We also re-write the error content to structural error object
|
||||
errorObj := make(map[string]string)
|
||||
if res.StatusCode == http.StatusUnauthorized {
|
||||
errorObj["error"] = "operation request from unauthorized source is rejected"
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
} else {
|
||||
//Extract the error and wrap it into the error object
|
||||
// Extract the error and wrap it into the error object
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
errorObj["error"] = fmt.Sprintf("%s: %s", res.Status, err.Error())
|
||||
@ -112,8 +112,8 @@ func modifyResponse(res *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Join the path
|
||||
//Copy from the go reverse proxy
|
||||
// Join the path
|
||||
// Copy from the go reverse proxy
|
||||
func singleJoiningSlash(a, b string) string {
|
||||
aslash := strings.HasSuffix(a, "/")
|
||||
bslash := strings.HasPrefix(b, "/")
|
||||
|
@ -14,19 +14,19 @@ const (
|
||||
maxDeletionThreads = 10
|
||||
)
|
||||
|
||||
//UtilityHandler provides utility methods
|
||||
// UtilityHandler provides utility methods
|
||||
type UtilityHandler struct {
|
||||
//Parse and process the chart version to provide required info data
|
||||
// Parse and process the chart version to provide required info data
|
||||
chartOperator *ChartOperator
|
||||
|
||||
//HTTP client used to call the realted APIs of the backend chart repositories
|
||||
// HTTP client used to call the realted APIs of the backend chart repositories
|
||||
apiClient *ChartClient
|
||||
|
||||
//Point to the url of the backend server
|
||||
// Point to the url of the backend server
|
||||
backendServerAddress *url.URL
|
||||
}
|
||||
|
||||
//GetChartsByNs gets the chart list under the namespace
|
||||
// GetChartsByNs gets the chart list under the namespace
|
||||
func (uh *UtilityHandler) GetChartsByNs(namespace string) ([]*ChartInfo, error) {
|
||||
if len(strings.TrimSpace(namespace)) == 0 {
|
||||
return nil, errors.New("empty namespace when getting chart list")
|
||||
@ -43,7 +43,7 @@ func (uh *UtilityHandler) GetChartsByNs(namespace string) ([]*ChartInfo, error)
|
||||
return uh.chartOperator.GetChartList(content)
|
||||
}
|
||||
|
||||
//DeleteChart deletes all the chart versions of the specified chart under the namespace.
|
||||
// DeleteChart deletes all the chart versions of the specified chart under the namespace.
|
||||
func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
if len(strings.TrimSpace(namespace)) == 0 {
|
||||
return errors.New("empty namespace when deleting chart")
|
||||
@ -66,8 +66,8 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Let's delete the versions in parallel
|
||||
//The number of goroutine is controlled by the const maxDeletionThreads
|
||||
// Let's delete the versions in parallel
|
||||
// The number of goroutine is controlled by the const maxDeletionThreads
|
||||
qSize := len(allVersions)
|
||||
if qSize > maxDeletionThreads {
|
||||
qSize = maxDeletionThreads
|
||||
@ -77,17 +77,17 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
waitGroup := new(sync.WaitGroup)
|
||||
waitGroup.Add(len(allVersions))
|
||||
|
||||
//Append initial tokens
|
||||
// Append initial tokens
|
||||
for i := 0; i < qSize; i++ {
|
||||
tokenQueue <- struct{}{}
|
||||
}
|
||||
|
||||
//Collect errors
|
||||
// Collect errors
|
||||
errs := make([]error, 0)
|
||||
errWrapper := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
//pass to the out func
|
||||
// pass to the out func
|
||||
if len(errs) > 0 {
|
||||
errWrapper <- fmt.Errorf("%v", errs)
|
||||
}
|
||||
@ -99,19 +99,19 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
//Schedule deletion tasks
|
||||
// Schedule deletion tasks
|
||||
for _, deletingVersion := range allVersions {
|
||||
//Apply for token first
|
||||
//If no available token, pending here
|
||||
// Apply for token first
|
||||
// If no available token, pending here
|
||||
<-tokenQueue
|
||||
|
||||
//Got one token
|
||||
// Got one token
|
||||
go func(deletingVersion *helm_repo.ChartVersion) {
|
||||
defer func() {
|
||||
//return the token back
|
||||
// return the token back
|
||||
tokenQueue <- struct{}{}
|
||||
|
||||
//done
|
||||
// done
|
||||
waitGroup.Done()
|
||||
}()
|
||||
|
||||
@ -121,9 +121,9 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
}(deletingVersion)
|
||||
}
|
||||
|
||||
//Wait all goroutines are done
|
||||
// Wait all goroutines are done
|
||||
waitGroup.Wait()
|
||||
//Safe to quit error collection goroutine
|
||||
// Safe to quit error collection goroutine
|
||||
close(errChan)
|
||||
|
||||
err = <-errWrapper
|
||||
@ -131,7 +131,7 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//deleteChartVersion deletes the specified chart version
|
||||
// deleteChartVersion deletes the specified chart version
|
||||
func (uh *UtilityHandler) deleteChartVersion(namespace, chartName, version string) error {
|
||||
path := fmt.Sprintf("/api/%s/charts/%s/%s", namespace, chartName, version)
|
||||
url := fmt.Sprintf("%s%s", uh.backendServerAddress.String(), path)
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
//TestGetChartsByNs tests GetChartsByNs method in UtilityHandler
|
||||
// TestGetChartsByNs tests GetChartsByNs method in UtilityHandler
|
||||
func TestGetChartsByNs(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.RequestURI {
|
||||
@ -43,7 +43,7 @@ func TestGetChartsByNs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Test the function DeleteChart
|
||||
// Test the function DeleteChart
|
||||
func TestDeleteChart(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.RequestURI {
|
||||
|
@ -14,7 +14,7 @@ const (
|
||||
contentTypeJSON = "application/json"
|
||||
)
|
||||
|
||||
//WriteError writes error to http client
|
||||
// WriteError writes error to http client
|
||||
func WriteError(w http.ResponseWriter, code int, err error) {
|
||||
errorObj := make(map[string]string)
|
||||
errorObj["error"] = err.Error()
|
||||
@ -26,20 +26,20 @@ func WriteError(w http.ResponseWriter, code int, err error) {
|
||||
w.Write(errorContent)
|
||||
}
|
||||
|
||||
//WriteInternalError writes error with statusCode == 500
|
||||
// WriteInternalError writes error with statusCode == 500
|
||||
func WriteInternalError(w http.ResponseWriter, err error) {
|
||||
WriteError(w, http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
//Write JSON data to http client
|
||||
// Write JSON data to http client
|
||||
func writeJSONData(w http.ResponseWriter, data []byte) {
|
||||
w.Header().Set(contentTypeHeader, contentTypeJSON)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(data)
|
||||
}
|
||||
|
||||
//Extract error object '{"error": "****---***"}' from the content if existing
|
||||
//nil error will be returned if it does exist
|
||||
// Extract error object '{"error": "****---***"}' from the content if existing
|
||||
// nil error will be returned if it does exist
|
||||
func extractError(content []byte) error {
|
||||
if len(content) == 0 {
|
||||
return nil
|
||||
@ -58,8 +58,8 @@ func extractError(content []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Parse the redis configuration to the beego cache pattern
|
||||
//Config pattern is "address:port[,weight,password,db_index]"
|
||||
// Parse the redis configuration to the beego cache pattern
|
||||
// Config pattern is "address:port[,weight,password,db_index]"
|
||||
func parseRedisConfig(redisConfigV string) (string, error) {
|
||||
if len(redisConfigV) == 0 {
|
||||
return "", errors.New("empty redis config")
|
||||
@ -68,47 +68,47 @@ func parseRedisConfig(redisConfigV string) (string, error) {
|
||||
redisConfig := make(map[string]string)
|
||||
redisConfig["key"] = cacheCollectionName
|
||||
|
||||
//Try best to parse the configuration segments.
|
||||
//If the related parts are missing, assign default value.
|
||||
//The default database index for UI process is 0.
|
||||
// Try best to parse the configuration segments.
|
||||
// If the related parts are missing, assign default value.
|
||||
// The default database index for UI process is 0.
|
||||
configSegments := strings.Split(redisConfigV, ",")
|
||||
for i, segment := range configSegments {
|
||||
if i > 3 {
|
||||
//ignore useless segments
|
||||
// ignore useless segments
|
||||
break
|
||||
}
|
||||
|
||||
switch i {
|
||||
//address:port
|
||||
// address:port
|
||||
case 0:
|
||||
redisConfig["conn"] = segment
|
||||
//password, may not exist
|
||||
// password, may not exist
|
||||
case 2:
|
||||
redisConfig["password"] = segment
|
||||
//database index, may not exist
|
||||
// database index, may not exist
|
||||
case 3:
|
||||
redisConfig["dbNum"] = segment
|
||||
}
|
||||
}
|
||||
|
||||
//Assign default value
|
||||
// Assign default value
|
||||
if len(redisConfig["dbNum"]) == 0 {
|
||||
redisConfig["dbNum"] = "0"
|
||||
}
|
||||
|
||||
//Try to validate the connection address
|
||||
// Try to validate the connection address
|
||||
fullAddr := redisConfig["conn"]
|
||||
if strings.Index(fullAddr, "://") == -1 {
|
||||
//Append schema
|
||||
// Append schema
|
||||
fullAddr = fmt.Sprintf("redis://%s", fullAddr)
|
||||
}
|
||||
//Validate it by url
|
||||
// Validate it by url
|
||||
_, err := url.Parse(fullAddr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
//Convert config map to string
|
||||
// Convert config map to string
|
||||
cfgData, err := json.Marshal(redisConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -5,21 +5,21 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
//Test the utility function parseRedisConfig
|
||||
// Test the utility function parseRedisConfig
|
||||
func TestParseRedisConfig(t *testing.T) {
|
||||
//Case 1: empty addr
|
||||
// Case 1: empty addr
|
||||
redisAddr := ""
|
||||
if _, err := parseRedisConfig(redisAddr); err == nil {
|
||||
t.Fatal("expect non nil error but got nil one if addr is empty")
|
||||
}
|
||||
|
||||
//Case 2: short pattern, addr:port
|
||||
// Case 2: short pattern, addr:port
|
||||
redisAddr = "redis:6379"
|
||||
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
|
||||
t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConnStr)
|
||||
}
|
||||
|
||||
//Case 3: long pattern but miss some parts
|
||||
// Case 3: long pattern but miss some parts
|
||||
redisAddr = "redis:6379,100"
|
||||
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
|
||||
t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %s\n", parsedConnStr)
|
||||
@ -29,7 +29,7 @@ func TestParseRedisConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Case 4: long pattern
|
||||
// Case 4: long pattern
|
||||
redisAddr = "redis:6379,100,Passw0rd,1"
|
||||
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
|
||||
t.Fatal("expect nil error but got non nil one if addr is long pattern")
|
||||
|
@ -154,7 +154,7 @@ var (
|
||||
ReadOnly,
|
||||
}
|
||||
|
||||
//value is default value
|
||||
// value is default value
|
||||
HarborStringKeysMap = map[string]string{
|
||||
AUTHMode: "db_auth",
|
||||
LDAPURL: "",
|
||||
|
@ -38,7 +38,7 @@ func GetTotalOfAccessLogs(query *models.LogQueryParam) (int64, error) {
|
||||
return logQueryConditions(query).Count()
|
||||
}
|
||||
|
||||
//GetAccessLogs gets access logs according to different conditions
|
||||
// GetAccessLogs gets access logs according to different conditions
|
||||
func GetAccessLogs(query *models.LogQueryParam) ([]models.AccessLog, error) {
|
||||
qs := logQueryConditions(query).OrderBy("-op_time")
|
||||
|
||||
|
@ -131,7 +131,7 @@ func ClearTable(table string) error {
|
||||
if table == models.UserTable {
|
||||
sql = fmt.Sprintf("delete from %s where user_id > 2", table)
|
||||
}
|
||||
if table == "project_metadata" { //make sure library is public
|
||||
if table == "project_metadata" { // make sure library is public
|
||||
sql = fmt.Sprintf("delete from %s where id > 1", table)
|
||||
}
|
||||
_, err := o.Raw(sql).Exec()
|
||||
@ -152,7 +152,7 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter
|
||||
return qs
|
||||
}
|
||||
|
||||
//Escape ..
|
||||
// Escape ..
|
||||
func Escape(str string) string {
|
||||
str = strings.Replace(str, `%`, `\%`, -1)
|
||||
str = strings.Replace(str, `_`, `\_`, -1)
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//SetClairVulnTimestamp update the last_update of a namespace. If there's no record for this namespace, one will be created.
|
||||
// SetClairVulnTimestamp update the last_update of a namespace. If there's no record for this namespace, one will be created.
|
||||
func SetClairVulnTimestamp(namespace string, timestamp time.Time) error {
|
||||
o := GetOrmer()
|
||||
rec := &models.ClairVulnTimestamp{
|
||||
@ -43,7 +43,7 @@ func SetClairVulnTimestamp(namespace string, timestamp time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//ListClairVulnTimestamps return a list of all records in vuln timestamp table.
|
||||
// ListClairVulnTimestamps return a list of all records in vuln timestamp table.
|
||||
func ListClairVulnTimestamps() ([]*models.ClairVulnTimestamp, error) {
|
||||
var res []*models.ClairVulnTimestamp
|
||||
o := GetOrmer()
|
||||
|
@ -32,7 +32,7 @@ var (
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
//GetOrmer return the singleton of Ormer for clair DB.
|
||||
// GetOrmer return the singleton of Ormer for clair DB.
|
||||
func GetOrmer() orm.Ormer {
|
||||
once.Do(func() {
|
||||
dbInstance, err := orm.GetDB(dao.ClairDBAlias)
|
||||
@ -47,7 +47,7 @@ func GetOrmer() orm.Ormer {
|
||||
return ormer
|
||||
}
|
||||
|
||||
//GetLastUpdate query the table `keyvalue` in clair's DB return the value of `updater/last`
|
||||
// GetLastUpdate query the table `keyvalue` in clair's DB return the value of `updater/last`
|
||||
func GetLastUpdate() (int64, error) {
|
||||
var list orm.ParamsList
|
||||
num, err := GetOrmer().Raw("SELECT value from keyvalue where key=?", updaterLast).ValuesFlat(&list)
|
||||
@ -60,7 +60,7 @@ func GetLastUpdate() (int64, error) {
|
||||
return 0, fmt.Errorf("The value: %v, is non-string", list[0])
|
||||
}
|
||||
res, err := strconv.ParseInt(s, 0, 64)
|
||||
if err != nil { //shouldn't be here.
|
||||
if err != nil { // shouldn't be here.
|
||||
return 0, err
|
||||
}
|
||||
return res, nil
|
||||
@ -68,6 +68,6 @@ func GetLastUpdate() (int64, error) {
|
||||
if num > 1 {
|
||||
return 0, fmt.Errorf("Multiple entries for %s in Clair DB", updaterLast)
|
||||
}
|
||||
//num is zero, it's not updated yet.
|
||||
// num is zero, it's not updated yet.
|
||||
return 0, nil
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func TestRegister(t *testing.T) {
|
||||
t.Errorf("Error occurred in Register: %v", err)
|
||||
}
|
||||
|
||||
//Check if user registered successfully.
|
||||
// Check if user registered successfully.
|
||||
queryUser := models.User{
|
||||
Username: username,
|
||||
}
|
||||
@ -567,7 +567,7 @@ func TestGetUserProjectRoles(t *testing.T) {
|
||||
t.Errorf("Error happened in GetUserProjectRole: %v, userID: %+v, project Id: %d", err, currentUser.UserID, currentProject.ProjectID)
|
||||
}
|
||||
|
||||
//Get the size of current user project role.
|
||||
// Get the size of current user project role.
|
||||
if len(r) != 1 {
|
||||
t.Errorf("The user, id: %d, should only have one role in project, id: %d, but actual: %d", currentUser.UserID, currentProject.ProjectID, len(r))
|
||||
}
|
||||
@ -675,7 +675,7 @@ func TestAddRepTarget(t *testing.T) {
|
||||
Username: "admin",
|
||||
Password: "admin",
|
||||
}
|
||||
//_, err := AddRepTarget(target)
|
||||
// _, err := AddRepTarget(target)
|
||||
id, err := AddRepTarget(target)
|
||||
t.Logf("added target, id: %d", id)
|
||||
if err != nil {
|
||||
|
@ -91,7 +91,7 @@ func DeleteUserGroup(id int) error {
|
||||
o := dao.GetOrmer()
|
||||
_, err := o.Delete(&userGroup)
|
||||
if err == nil {
|
||||
//Delete all related project members
|
||||
// Delete all related project members
|
||||
sql := `delete from project_member where entity_id = ? and entity_type='g'`
|
||||
_, err := o.Raw(sql, id).Exec()
|
||||
if err != nil {
|
||||
@ -147,7 +147,7 @@ func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string {
|
||||
count++
|
||||
}
|
||||
}
|
||||
//No LDAP Group found
|
||||
// No LDAP Group found
|
||||
if count == 0 {
|
||||
return ""
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ var createdUserGroupID int
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
//databases := []string{"mysql", "sqlite"}
|
||||
// databases := []string{"mysql", "sqlite"}
|
||||
databases := []string{"postgresql"}
|
||||
for _, database := range databases {
|
||||
log.Infof("run test cases for database: %s", database)
|
||||
@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
|
||||
log.Fatalf("invalid database: %s", database)
|
||||
}
|
||||
|
||||
//Extract to test utils
|
||||
// Extract to test utils
|
||||
initSqls := []string{
|
||||
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
_ "github.com/go-sql-driver/mysql" //register mysql driver
|
||||
_ "github.com/go-sql-driver/mysql" // register mysql driver
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
)
|
||||
|
||||
|
@ -20,12 +20,12 @@ import (
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
"github.com/golang-migrate/migrate"
|
||||
_ "github.com/golang-migrate/migrate/database/postgres" //import pgsql driver for migrator
|
||||
_ "github.com/golang-migrate/migrate/database/postgres" // import pgsql driver for migrator
|
||||
_ "github.com/golang-migrate/migrate/source/file" // import local file driver for migrator
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
_ "github.com/lib/pq" //register pgsql driver
|
||||
_ "github.com/lib/pq" // register pgsql driver
|
||||
)
|
||||
|
||||
const defaultMigrationPath = "migrations/postgresql/"
|
||||
@ -71,7 +71,7 @@ func NewPGSQL(host string, port string, usr string, pwd string, database string,
|
||||
}
|
||||
}
|
||||
|
||||
//Register registers pgSQL to orm with the info wrapped by the instance.
|
||||
// Register registers pgSQL to orm with the info wrapped by the instance.
|
||||
func (p *pgsql) Register(alias ...string) error {
|
||||
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
|
||||
return err
|
||||
@ -91,10 +91,10 @@ func (p *pgsql) Register(alias ...string) error {
|
||||
return orm.RegisterDataBase(an, "postgres", info)
|
||||
}
|
||||
|
||||
//UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
|
||||
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
|
||||
func (p *pgsql) UpgradeSchema() error {
|
||||
dbURL := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", p.usr, p.pwd, p.host, p.port, p.database, pgsqlSSLMode(p.sslmode))
|
||||
//For UT
|
||||
// For UT
|
||||
path := os.Getenv("POSTGRES_MIGRATION_SCRIPTS_PATH")
|
||||
if len(path) == 0 {
|
||||
path = defaultMigrationPath
|
||||
@ -114,7 +114,7 @@ func (p *pgsql) UpgradeSchema() error {
|
||||
err = m.Up()
|
||||
if err == migrate.ErrNoChange {
|
||||
log.Infof("No change in schema, skip.")
|
||||
} else if err != nil { //migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout.
|
||||
} else if err != nil { // migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout.
|
||||
log.Errorf("Failed to upgrade schema, error: %q", err)
|
||||
return err
|
||||
}
|
||||
|
@ -292,8 +292,8 @@ func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error
|
||||
return roles, nil
|
||||
}
|
||||
o := GetOrmer()
|
||||
//Because an LDAP user can be memberof multiple groups,
|
||||
//the role is in descent order (1-admin, 2-developer, 3-guest), use min to select the max privilege role.
|
||||
// Because an LDAP user can be memberof multiple groups,
|
||||
// the role is in descent order (1-admin, 2-developer, 3-guest), use min to select the max privilege role.
|
||||
sql := fmt.Sprintf(
|
||||
`select min(pm.role) from project_member pm
|
||||
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
|
||||
@ -304,7 +304,7 @@ func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error
|
||||
log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
//If there is no row selected, the min returns an empty row, to avoid return 0 as role
|
||||
// If there is no row selected, the min returns an empty row, to avoid return 0 as role
|
||||
if len(roles) == 1 && roles[0] == 0 {
|
||||
return []int{}, nil
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
//databases := []string{"mysql", "sqlite"}
|
||||
// databases := []string{"mysql", "sqlite"}
|
||||
databases := []string{"postgresql"}
|
||||
for _, database := range databases {
|
||||
log.Infof("run test cases for database: %s", database)
|
||||
@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
|
||||
log.Fatalf("invalid database: %s", database)
|
||||
}
|
||||
|
||||
//Extract to test utils
|
||||
// Extract to test utils
|
||||
initSqls := []string{
|
||||
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||
|
@ -91,13 +91,13 @@ func IncreasePullCount(name string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
//RepositoryExists returns whether the repository exists according to its name.
|
||||
// RepositoryExists returns whether the repository exists according to its name.
|
||||
func RepositoryExists(name string) bool {
|
||||
o := GetOrmer()
|
||||
return o.QueryTable("repository").Filter("name", name).Exist()
|
||||
}
|
||||
|
||||
//GetTopRepos returns the most popular repositories whose project ID is
|
||||
// GetTopRepos returns the most popular repositories whose project ID is
|
||||
// in projectIDs
|
||||
func GetTopRepos(projectIDs []int64, n int) ([]*models.RepoRecord, error) {
|
||||
repositories := []*models.RepoRecord{}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/astaxie/beego/orm"
|
||||
_ "github.com/mattn/go-sqlite3" //register sqlite driver
|
||||
_ "github.com/mattn/go-sqlite3" // register sqlite driver
|
||||
)
|
||||
|
||||
type sqlite struct {
|
||||
|
@ -94,7 +94,7 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
user.Password = "" //do not return the password
|
||||
user.Password = "" // do not return the password
|
||||
|
||||
return &user, nil
|
||||
}
|
||||
@ -244,7 +244,7 @@ func OnBoardUser(u *models.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//IsSuperUser checks if the user is super user(conventionally id == 1) of Harbor
|
||||
// IsSuperUser checks if the user is super user(conventionally id == 1) of Harbor
|
||||
func IsSuperUser(username string) bool {
|
||||
u, err := GetUser(models.User{
|
||||
Username: username,
|
||||
@ -257,7 +257,7 @@ func IsSuperUser(username string) bool {
|
||||
return u != nil && u.UserID == 1
|
||||
}
|
||||
|
||||
//CleanUser - Clean this user information from DB
|
||||
// CleanUser - Clean this user information from DB
|
||||
func CleanUser(id int64) error {
|
||||
if _, err := GetOrmer().QueryTable(&models.User{}).
|
||||
Filter("UserID", id).Delete(); err != nil {
|
||||
|
@ -17,7 +17,7 @@ type Client interface {
|
||||
SubmitJob(*models.JobData) (string, error)
|
||||
GetJobLog(uuid string) ([]byte, error)
|
||||
PostAction(uuid, action string) error
|
||||
//TODO Redirect joblog when we see there's memory issue.
|
||||
// TODO Redirect joblog when we see there's memory issue.
|
||||
}
|
||||
|
||||
// DefaultClient is the default implementation of Client interface
|
||||
@ -41,7 +41,7 @@ func NewDefaultClient(endpoint, secret string) *DefaultClient {
|
||||
}
|
||||
}
|
||||
|
||||
//SubmitJob call jobserivce API to submit a job and returns the job's UUID.
|
||||
// SubmitJob call jobserivce API to submit a job and returns the job's UUID.
|
||||
func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
|
||||
url := d.endpoint + "/api/v1/jobs"
|
||||
jq := models.JobRequest{
|
||||
@ -78,7 +78,7 @@ func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
|
||||
return stats.Stats.JobID, nil
|
||||
}
|
||||
|
||||
//GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job
|
||||
// GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job
|
||||
func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) {
|
||||
url := d.endpoint + "/api/v1/jobs/" + uuid + "/log"
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
|
@ -1,7 +1,7 @@
|
||||
package job
|
||||
|
||||
const (
|
||||
//ImageScanJob is name of scan job it will be used as key to register to job service.
|
||||
// ImageScanJob is name of scan job it will be used as key to register to job service.
|
||||
ImageScanJob = "IMAGE_SCAN"
|
||||
// ImageTransfer : the name of image transfer job in job service
|
||||
ImageTransfer = "IMAGE_TRANSFER"
|
||||
@ -12,26 +12,26 @@ const (
|
||||
// ImageGC the name of image garbage collection job in job service
|
||||
ImageGC = "IMAGE_GC"
|
||||
|
||||
//JobKindGeneric : Kind of generic job
|
||||
// JobKindGeneric : Kind of generic job
|
||||
JobKindGeneric = "Generic"
|
||||
//JobKindScheduled : Kind of scheduled job
|
||||
// JobKindScheduled : Kind of scheduled job
|
||||
JobKindScheduled = "Scheduled"
|
||||
//JobKindPeriodic : Kind of periodic job
|
||||
// JobKindPeriodic : Kind of periodic job
|
||||
JobKindPeriodic = "Periodic"
|
||||
|
||||
//JobServiceStatusPending : job status pending
|
||||
// JobServiceStatusPending : job status pending
|
||||
JobServiceStatusPending = "Pending"
|
||||
//JobServiceStatusRunning : job status running
|
||||
// JobServiceStatusRunning : job status running
|
||||
JobServiceStatusRunning = "Running"
|
||||
//JobServiceStatusStopped : job status stopped
|
||||
// JobServiceStatusStopped : job status stopped
|
||||
JobServiceStatusStopped = "Stopped"
|
||||
//JobServiceStatusCancelled : job status cancelled
|
||||
// JobServiceStatusCancelled : job status cancelled
|
||||
JobServiceStatusCancelled = "Cancelled"
|
||||
//JobServiceStatusError : job status error
|
||||
// JobServiceStatusError : job status error
|
||||
JobServiceStatusError = "Error"
|
||||
//JobServiceStatusSuccess : job status success
|
||||
// JobServiceStatusSuccess : job status success
|
||||
JobServiceStatusSuccess = "Success"
|
||||
//JobServiceStatusScheduled : job status scheduled
|
||||
// JobServiceStatusScheduled : job status scheduled
|
||||
JobServiceStatusScheduled = "Scheduled"
|
||||
|
||||
// JobActionStop : the action to stop the job
|
||||
|
@ -2,15 +2,15 @@
|
||||
|
||||
package models
|
||||
|
||||
//Parameters for job execution.
|
||||
// Parameters for job execution.
|
||||
type Parameters map[string]interface{}
|
||||
|
||||
//JobRequest is the request of launching a job.
|
||||
// JobRequest is the request of launching a job.
|
||||
type JobRequest struct {
|
||||
Job *JobData `json:"job"`
|
||||
}
|
||||
|
||||
//JobData keeps the basic info.
|
||||
// JobData keeps the basic info.
|
||||
type JobData struct {
|
||||
Name string `json:"name"`
|
||||
Parameters Parameters `json:"parameters"`
|
||||
@ -18,7 +18,7 @@ type JobData struct {
|
||||
StatusHook string `json:"status_hook"`
|
||||
}
|
||||
|
||||
//JobMetadata stores the metadata of job.
|
||||
// JobMetadata stores the metadata of job.
|
||||
type JobMetadata struct {
|
||||
JobKind string `json:"kind"`
|
||||
ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
|
||||
@ -26,12 +26,12 @@ type JobMetadata struct {
|
||||
IsUnique bool `json:"unique"`
|
||||
}
|
||||
|
||||
//JobStats keeps the result of job launching.
|
||||
// JobStats keeps the result of job launching.
|
||||
type JobStats struct {
|
||||
Stats *JobStatData `json:"job"`
|
||||
}
|
||||
|
||||
//JobStatData keeps the stats of job
|
||||
// JobStatData keeps the stats of job
|
||||
type JobStatData struct {
|
||||
JobID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
@ -49,12 +49,12 @@ type JobStatData struct {
|
||||
HookStatus string `json:"hook_status,omitempty"`
|
||||
}
|
||||
|
||||
//JobPoolStats represents the healthy and status of all the running worker pools.
|
||||
// JobPoolStats represents the healthy and status of all the running worker pools.
|
||||
type JobPoolStats struct {
|
||||
Pools []*JobPoolStatsData `json:"worker_pools"`
|
||||
}
|
||||
|
||||
//JobPoolStatsData represent the healthy and status of the worker pool.
|
||||
// JobPoolStatsData represent the healthy and status of the worker pool.
|
||||
type JobPoolStatsData struct {
|
||||
WorkerPoolID string `json:"worker_pool_id"`
|
||||
StartedAt int64 `json:"started_at"`
|
||||
@ -64,20 +64,20 @@ type JobPoolStatsData struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
//JobActionRequest defines for triggering job action like stop/cancel.
|
||||
// JobActionRequest defines for triggering job action like stop/cancel.
|
||||
type JobActionRequest struct {
|
||||
Action string `json:"action"`
|
||||
}
|
||||
|
||||
//JobStatusChange is designed for reporting the status change via hook.
|
||||
// JobStatusChange is designed for reporting the status change via hook.
|
||||
type JobStatusChange struct {
|
||||
JobID string `json:"job_id"`
|
||||
Status string `json:"status"`
|
||||
CheckIn string `json:"check_in,omitempty"`
|
||||
}
|
||||
|
||||
//Message is designed for sub/pub messages
|
||||
// Message is designed for sub/pub messages
|
||||
type Message struct {
|
||||
Event string
|
||||
Data interface{} //generic format
|
||||
Data interface{} // generic format
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//AdminJobTable is table name for admin job
|
||||
// AdminJobTable is table name for admin job
|
||||
AdminJobTable = "admin_job"
|
||||
)
|
||||
|
||||
@ -36,7 +36,7 @@ type AdminJob struct {
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map AdminJob to table AdminJob
|
||||
// TableName is required by by beego orm to map AdminJob to table AdminJob
|
||||
func (a *AdminJob) TableName() string {
|
||||
return AdminJobTable
|
||||
}
|
||||
|
@ -29,12 +29,12 @@ type ClairVulnTimestamp struct {
|
||||
LastUpdateUTC int64 `orm:"-" json:"last_update"`
|
||||
}
|
||||
|
||||
//TableName is required by beego to map struct to table.
|
||||
// TableName is required by beego to map struct to table.
|
||||
func (ct *ClairVulnTimestamp) TableName() string {
|
||||
return ClairVulnTimestampTable
|
||||
}
|
||||
|
||||
//ClairLayer ...
|
||||
// ClairLayer ...
|
||||
type ClairLayer struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceNames []string `json:"NamespaceNames,omitempty"`
|
||||
@ -45,7 +45,7 @@ type ClairLayer struct {
|
||||
Features []ClairFeature `json:"Features,omitempty"`
|
||||
}
|
||||
|
||||
//ClairFeature ...
|
||||
// ClairFeature ...
|
||||
type ClairFeature struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
@ -55,7 +55,7 @@ type ClairFeature struct {
|
||||
AddedBy string `json:"AddedBy,omitempty"`
|
||||
}
|
||||
|
||||
//ClairVulnerability ...
|
||||
// ClairVulnerability ...
|
||||
type ClairVulnerability struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
NamespaceName string `json:"NamespaceName,omitempty"`
|
||||
@ -67,18 +67,18 @@ type ClairVulnerability struct {
|
||||
FixedIn []ClairFeature `json:"FixedIn,omitempty"`
|
||||
}
|
||||
|
||||
//ClairError ...
|
||||
// ClairError ...
|
||||
type ClairError struct {
|
||||
Message string `json:"Message,omitempty"`
|
||||
}
|
||||
|
||||
//ClairLayerEnvelope ...
|
||||
// ClairLayerEnvelope ...
|
||||
type ClairLayerEnvelope struct {
|
||||
Layer *ClairLayer `json:"Layer,omitempty"`
|
||||
Error *ClairError `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
//ClairNotification ...
|
||||
// ClairNotification ...
|
||||
type ClairNotification struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
Created string `json:"Created,omitempty"`
|
||||
@ -91,45 +91,45 @@ type ClairNotification struct {
|
||||
New *ClairVulnerabilityWithLayers `json:"New,omitempty"`
|
||||
}
|
||||
|
||||
//ClairNotificationEnvelope ...
|
||||
// ClairNotificationEnvelope ...
|
||||
type ClairNotificationEnvelope struct {
|
||||
Notification *ClairNotification `json:"Notification,omitempty"`
|
||||
Error *ClairError `json:"Error,omitempty"`
|
||||
}
|
||||
|
||||
//ClairVulnerabilityWithLayers ...
|
||||
// ClairVulnerabilityWithLayers ...
|
||||
type ClairVulnerabilityWithLayers struct {
|
||||
Vulnerability *ClairVulnerability `json:"Vulnerability,omitempty"`
|
||||
OrderedLayersIntroducingVulnerability []ClairOrderedLayerName `json:"OrderedLayersIntroducingVulnerability,omitempty"`
|
||||
}
|
||||
|
||||
//ClairOrderedLayerName ...
|
||||
// ClairOrderedLayerName ...
|
||||
type ClairOrderedLayerName struct {
|
||||
Index int `json:"Index"`
|
||||
LayerName string `json:"LayerName"`
|
||||
}
|
||||
|
||||
//ClairVulnerabilityStatus reflects the readiness and freshness of vulnerability data in Clair,
|
||||
//which will be returned in response of systeminfo API.
|
||||
// ClairVulnerabilityStatus reflects the readiness and freshness of vulnerability data in Clair,
|
||||
// which will be returned in response of systeminfo API.
|
||||
type ClairVulnerabilityStatus struct {
|
||||
OverallUTC int64 `json:"overall_last_update,omitempty"`
|
||||
Details []ClairNamespaceTimestamp `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
//ClairNamespaceTimestamp is a record to store the clairname space and the timestamp,
|
||||
//in practice different namespace in Clair maybe merged into one, e.g. ubuntu:14.04 and ubuntu:16.4 maybe merged into ubuntu and put into response.
|
||||
// ClairNamespaceTimestamp is a record to store the clairname space and the timestamp,
|
||||
// in practice different namespace in Clair maybe merged into one, e.g. ubuntu:14.04 and ubuntu:16.4 maybe merged into ubuntu and put into response.
|
||||
type ClairNamespaceTimestamp struct {
|
||||
Namespace string `json:"namespace"`
|
||||
Timestamp int64 `json:"last_update"`
|
||||
}
|
||||
|
||||
//ClairNamespace ...
|
||||
// ClairNamespace ...
|
||||
type ClairNamespace struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
VersionFormat string `json:"VersionFormat,omitempty"`
|
||||
}
|
||||
|
||||
//ClairNamespaceEnvelope ...
|
||||
// ClairNamespaceEnvelope ...
|
||||
type ClairNamespaceEnvelope struct {
|
||||
Namespaces *[]ClairNamespace `json:"Namespaces,omitempty"`
|
||||
Error *ClairError `json:"Error,omitempty"`
|
||||
|
@ -15,21 +15,21 @@
|
||||
package models
|
||||
|
||||
const (
|
||||
//JobPending ...
|
||||
// JobPending ...
|
||||
JobPending string = "pending"
|
||||
//JobRunning ...
|
||||
// JobRunning ...
|
||||
JobRunning string = "running"
|
||||
//JobError ...
|
||||
// JobError ...
|
||||
JobError string = "error"
|
||||
//JobStopped ...
|
||||
// JobStopped ...
|
||||
JobStopped string = "stopped"
|
||||
//JobFinished ...
|
||||
// JobFinished ...
|
||||
JobFinished string = "finished"
|
||||
//JobCanceled ...
|
||||
// JobCanceled ...
|
||||
JobCanceled string = "canceled"
|
||||
//JobRetrying indicate the job needs to be retried, it will be scheduled to the end of job queue by statemachine after an interval.
|
||||
// JobRetrying indicate the job needs to be retried, it will be scheduled to the end of job queue by statemachine after an interval.
|
||||
JobRetrying string = "retrying"
|
||||
//JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on trasition table.
|
||||
// JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on trasition table.
|
||||
JobContinue string = "_continue"
|
||||
// JobScheduled ...
|
||||
JobScheduled string = "scheduled"
|
||||
|
@ -45,7 +45,7 @@ type LdapUser struct {
|
||||
GroupDNList []string `json:"ldap_groupdn"`
|
||||
}
|
||||
|
||||
//LdapImportUser ...
|
||||
// LdapImportUser ...
|
||||
type LdapImportUser struct {
|
||||
LdapUIDList []string `json:"ldap_uid_list"`
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
const (
|
||||
ProMetaPublic = "public"
|
||||
ProMetaEnableContentTrust = "enable_content_trust"
|
||||
ProMetaPreventVul = "prevent_vul" //prevent vulnerable images from being pulled
|
||||
ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled
|
||||
ProMetaSeverity = "severity"
|
||||
ProMetaAutoScan = "auto_scan"
|
||||
SeverityNone = "negligible"
|
||||
|
@ -154,7 +154,7 @@ type BaseProjectCollection struct {
|
||||
// ProjectRequest holds informations that need for creating project API
|
||||
type ProjectRequest struct {
|
||||
Name string `json:"project_name"`
|
||||
Public *int `json:"public"` //deprecated, reserved for project creation in replication
|
||||
Public *int `json:"public"` // deprecated, reserved for project creation in replication
|
||||
Metadata map[string]string `json:"metadata"`
|
||||
}
|
||||
|
||||
@ -164,7 +164,7 @@ type ProjectQueryResult struct {
|
||||
Projects []*Project
|
||||
}
|
||||
|
||||
//TableName is required by beego orm to map Project to table project
|
||||
// TableName is required by beego orm to map Project to table project
|
||||
func (p *Project) TableName() string {
|
||||
return ProjectTable
|
||||
}
|
||||
|
@ -22,17 +22,17 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//RepOpTransfer represents the operation of a job to transfer repository to a remote registry/harbor instance.
|
||||
// RepOpTransfer represents the operation of a job to transfer repository to a remote registry/harbor instance.
|
||||
RepOpTransfer string = "transfer"
|
||||
//RepOpDelete represents the operation of a job to remove repository from a remote registry/harbor instance.
|
||||
// RepOpDelete represents the operation of a job to remove repository from a remote registry/harbor instance.
|
||||
RepOpDelete string = "delete"
|
||||
//RepOpSchedule represents the operation of a job to schedule the real replication process
|
||||
// RepOpSchedule represents the operation of a job to schedule the real replication process
|
||||
RepOpSchedule string = "schedule"
|
||||
//RepTargetTable is the table name for replication targets
|
||||
// RepTargetTable is the table name for replication targets
|
||||
RepTargetTable = "replication_target"
|
||||
//RepJobTable is the table name for replication jobs
|
||||
// RepJobTable is the table name for replication jobs
|
||||
RepJobTable = "replication_job"
|
||||
//RepPolicyTable is table name for replication policies
|
||||
// RepPolicyTable is table name for replication policies
|
||||
RepPolicyTable = "replication_policy"
|
||||
)
|
||||
|
||||
@ -108,17 +108,17 @@ func (r *RepTarget) Valid(v *validation.Validation) {
|
||||
}
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map RepTarget to table replication_target
|
||||
// TableName is required by by beego orm to map RepTarget to table replication_target
|
||||
func (r *RepTarget) TableName() string {
|
||||
return RepTargetTable
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map RepJob to table replication_job
|
||||
// TableName is required by by beego orm to map RepJob to table replication_job
|
||||
func (r *RepJob) TableName() string {
|
||||
return RepJobTable
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map RepPolicy to table replication_policy
|
||||
// TableName is required by by beego orm to map RepPolicy to table replication_policy
|
||||
func (r *RepPolicy) TableName() string {
|
||||
return RepPolicyTable
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//RepoTable is the table name for repository
|
||||
// RepoTable is the table name for repository
|
||||
const RepoTable = "repository"
|
||||
|
||||
// RepoRecord holds the record of an repository in DB, all the infors are from the registry notification event.
|
||||
@ -33,7 +33,7 @@ type RepoRecord struct {
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map RepoRecord to table repository
|
||||
// TableName is required by by beego orm to map RepoRecord to table repository
|
||||
func (rp *RepoRecord) TableName() string {
|
||||
return RepoTable
|
||||
}
|
||||
|
@ -15,11 +15,11 @@
|
||||
package models
|
||||
|
||||
const (
|
||||
//PROJECTADMIN project administrator
|
||||
// PROJECTADMIN project administrator
|
||||
PROJECTADMIN = 1
|
||||
//DEVELOPER developer
|
||||
// DEVELOPER developer
|
||||
DEVELOPER = 2
|
||||
//GUEST guest
|
||||
// GUEST guest
|
||||
GUEST = 3
|
||||
)
|
||||
|
||||
|
@ -16,13 +16,13 @@ package models
|
||||
|
||||
import "time"
|
||||
|
||||
//ScanJobTable is the name of the table whose data is mapped by ScanJob struct.
|
||||
// ScanJobTable is the name of the table whose data is mapped by ScanJob struct.
|
||||
const ScanJobTable = "img_scan_job"
|
||||
|
||||
//ScanOverviewTable is the name of the table whose data is mapped by ImgScanOverview struct.
|
||||
// ScanOverviewTable is the name of the table whose data is mapped by ImgScanOverview struct.
|
||||
const ScanOverviewTable = "img_scan_overview"
|
||||
|
||||
//ScanJob is the model to represent a job for image scan in DB.
|
||||
// ScanJob is the model to represent a job for image scan in DB.
|
||||
type ScanJob struct {
|
||||
ID int64 `orm:"pk;auto;column(id)" json:"id"`
|
||||
Status string `orm:"column(status)" json:"status"`
|
||||
@ -47,7 +47,7 @@ const (
|
||||
SevHigh
|
||||
)
|
||||
|
||||
//String is the output function for sererity variable
|
||||
// String is the output function for sererity variable
|
||||
func (sev Severity) String() string {
|
||||
name := []string{"negligible", "unknown", "low", "medium", "high"}
|
||||
i := int64(sev)
|
||||
@ -59,12 +59,12 @@ func (sev Severity) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
//TableName is required by by beego orm to map ScanJob to table img_scan_job
|
||||
// TableName is required by by beego orm to map ScanJob to table img_scan_job
|
||||
func (s *ScanJob) TableName() string {
|
||||
return ScanJobTable
|
||||
}
|
||||
|
||||
//ImgScanOverview mapped to a record of image scan overview.
|
||||
// ImgScanOverview mapped to a record of image scan overview.
|
||||
type ImgScanOverview struct {
|
||||
ID int64 `orm:"pk;auto;column(id)" json:"-"`
|
||||
Digest string `orm:"column(image_digest)" json:"image_digest"`
|
||||
@ -78,18 +78,18 @@ type ImgScanOverview struct {
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time,omitempty"`
|
||||
}
|
||||
|
||||
//TableName ...
|
||||
// TableName ...
|
||||
func (iso *ImgScanOverview) TableName() string {
|
||||
return ScanOverviewTable
|
||||
}
|
||||
|
||||
//ComponentsOverview has the total number and a list of components number of different serverity level.
|
||||
// ComponentsOverview has the total number and a list of components number of different serverity level.
|
||||
type ComponentsOverview struct {
|
||||
Total int `json:"total"`
|
||||
Summary []*ComponentsOverviewEntry `json:"summary"`
|
||||
}
|
||||
|
||||
//ComponentsOverviewEntry ...
|
||||
// ComponentsOverviewEntry ...
|
||||
type ComponentsOverviewEntry struct {
|
||||
Sev int `json:"severity"`
|
||||
Count int `json:"count"`
|
||||
@ -129,7 +129,7 @@ const (
|
||||
ScanAllDailyTime = "daily_time"
|
||||
)
|
||||
|
||||
//DefaultScanAllPolicy ...
|
||||
// DefaultScanAllPolicy ...
|
||||
var DefaultScanAllPolicy = ScanAllPolicy{
|
||||
Type: ScanAllDaily,
|
||||
Parm: map[string]interface{}{
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
package models
|
||||
|
||||
//UAASettings wraps the configuraations to access UAA service
|
||||
// UAASettings wraps the configuraations to access UAA service
|
||||
type UAASettings struct {
|
||||
Endpoint string
|
||||
ClientID string
|
||||
|
@ -31,8 +31,8 @@ type User struct {
|
||||
Comment string `orm:"column(comment)" json:"comment"`
|
||||
Deleted bool `orm:"column(deleted)" json:"deleted"`
|
||||
Rolename string `orm:"-" json:"role_name"`
|
||||
//if this field is named as "RoleID", beego orm can not map role_id
|
||||
//to it.
|
||||
// if this field is named as "RoleID", beego orm can not map role_id
|
||||
// to it.
|
||||
Role int `orm:"-" json:"role_id"`
|
||||
// RoleList []Role `json:"role_list"`
|
||||
HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`
|
||||
|
@ -29,7 +29,7 @@ type WatchItem struct {
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
}
|
||||
|
||||
//TableName ...
|
||||
// TableName ...
|
||||
func (w *WatchItem) TableName() string {
|
||||
return "replication_immediate_trigger"
|
||||
}
|
||||
|
@ -7,13 +7,13 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
)
|
||||
|
||||
//WatchConfigChanges is used to watch the configuration changes.
|
||||
// WatchConfigChanges is used to watch the configuration changes.
|
||||
func WatchConfigChanges(cfg map[string]interface{}) error {
|
||||
if cfg == nil {
|
||||
return errors.New("Empty configurations")
|
||||
}
|
||||
|
||||
//Currently only watch the scan all policy change.
|
||||
// Currently only watch the scan all policy change.
|
||||
if v, ok := cfg[ScanAllPolicyTopic]; ok {
|
||||
policyCfg := &models.ScanAllPolicy{}
|
||||
if err := utils.ConvertMapToStruct(policyCfg, v); err != nil {
|
||||
|
@ -1,15 +1,15 @@
|
||||
package notifier
|
||||
|
||||
//NotificationHandler defines what operations a notification handler
|
||||
//should have.
|
||||
// NotificationHandler defines what operations a notification handler
|
||||
// should have.
|
||||
type NotificationHandler interface {
|
||||
//Handle the event when it coming.
|
||||
//value might be optional, it depends on usages.
|
||||
// Handle the event when it coming.
|
||||
// value might be optional, it depends on usages.
|
||||
Handle(value interface{}) error
|
||||
|
||||
//IsStateful returns whether the handler is stateful or not.
|
||||
//If handler is stateful, it will not be triggerred in parallel.
|
||||
//Otherwise, the handler will be triggered concurrently if more
|
||||
//than one same handler are matched the topics.
|
||||
// IsStateful returns whether the handler is stateful or not.
|
||||
// If handler is stateful, it will not be triggerred in parallel.
|
||||
// Otherwise, the handler will be triggered concurrently if more
|
||||
// than one same handler are matched the topics.
|
||||
IsStateful() bool
|
||||
}
|
||||
|
@ -10,52 +10,52 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
//HandlerIndexer is setup the relationship between the handler type and
|
||||
//instance.
|
||||
// HandlerIndexer is setup the relationship between the handler type and
|
||||
// instance.
|
||||
type HandlerIndexer map[string]NotificationHandler
|
||||
|
||||
//Notification wraps the topic and related data value if existing.
|
||||
// Notification wraps the topic and related data value if existing.
|
||||
type Notification struct {
|
||||
//Topic of notification
|
||||
//Required
|
||||
// Topic of notification
|
||||
// Required
|
||||
Topic string
|
||||
|
||||
//Value of notification.
|
||||
//Optional
|
||||
// Value of notification.
|
||||
// Optional
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
//HandlerChannel provides not only the chan itself but also the count of
|
||||
//handlers related with this chan.
|
||||
// HandlerChannel provides not only the chan itself but also the count of
|
||||
// handlers related with this chan.
|
||||
type HandlerChannel struct {
|
||||
//To indicate how many handler instances bound with this chan.
|
||||
// To indicate how many handler instances bound with this chan.
|
||||
boundCount uint32
|
||||
|
||||
//The chan for controlling concurrent executions.
|
||||
// The chan for controlling concurrent executions.
|
||||
channel chan bool
|
||||
}
|
||||
|
||||
//NotificationWatcher is defined to accept the events published
|
||||
//by the sender and match it with pre-registered notification handler
|
||||
//and then trigger the execution of the found handler.
|
||||
// NotificationWatcher is defined to accept the events published
|
||||
// by the sender and match it with pre-registered notification handler
|
||||
// and then trigger the execution of the found handler.
|
||||
type NotificationWatcher struct {
|
||||
//For handle concurrent scenario.
|
||||
// For handle concurrent scenario.
|
||||
*sync.RWMutex
|
||||
|
||||
//To keep the registered handlers in memory.
|
||||
//Each topic can register multiple handlers.
|
||||
//Each handler can bind to multiple topics.
|
||||
// To keep the registered handlers in memory.
|
||||
// Each topic can register multiple handlers.
|
||||
// Each handler can bind to multiple topics.
|
||||
handlers map[string]HandlerIndexer
|
||||
|
||||
//Keep the channels which are used to control the concurrent executions
|
||||
//of multiple stateful handlers with same type.
|
||||
// Keep the channels which are used to control the concurrent executions
|
||||
// of multiple stateful handlers with same type.
|
||||
handlerChannels map[string]*HandlerChannel
|
||||
}
|
||||
|
||||
//notificationWatcher is a default notification watcher in package level.
|
||||
// notificationWatcher is a default notification watcher in package level.
|
||||
var notificationWatcher = NewNotificationWatcher()
|
||||
|
||||
//NewNotificationWatcher is constructor of NotificationWatcher.
|
||||
// NewNotificationWatcher is constructor of NotificationWatcher.
|
||||
func NewNotificationWatcher() *NotificationWatcher {
|
||||
return &NotificationWatcher{
|
||||
new(sync.RWMutex),
|
||||
@ -64,7 +64,7 @@ func NewNotificationWatcher() *NotificationWatcher {
|
||||
}
|
||||
}
|
||||
|
||||
//Handle the related topic with the specified handler.
|
||||
// Handle the related topic with the specified handler.
|
||||
func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler) error {
|
||||
if strings.TrimSpace(topic) == "" {
|
||||
return errors.New("Empty topic is not supported")
|
||||
@ -91,11 +91,11 @@ func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler)
|
||||
}
|
||||
|
||||
if handler.IsStateful() {
|
||||
//First time
|
||||
// First time
|
||||
if handlerChan, ok := nw.handlerChannels[t]; !ok {
|
||||
nw.handlerChannels[t] = &HandlerChannel{1, make(chan bool, 1)}
|
||||
} else {
|
||||
//Already have chan, just increase count
|
||||
// Already have chan, just increase count
|
||||
handlerChan.boundCount++
|
||||
}
|
||||
}
|
||||
@ -103,9 +103,9 @@ func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler)
|
||||
return nil
|
||||
}
|
||||
|
||||
//UnHandle is to revoke the registered handler with the specified topic.
|
||||
//'handler' is optional, the type name of the handler. If it's empty value,
|
||||
//then revoke the whole topic, otherwise only revoke the specified handler.
|
||||
// UnHandle is to revoke the registered handler with the specified topic.
|
||||
// 'handler' is optional, the type name of the handler. If it's empty value,
|
||||
// then revoke the whole topic, otherwise only revoke the specified handler.
|
||||
func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
|
||||
if strings.TrimSpace(topic) == "" {
|
||||
return errors.New("Empty topic is not supported")
|
||||
@ -115,20 +115,20 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
|
||||
nw.Lock()
|
||||
|
||||
var revokeHandler = func(indexer HandlerIndexer, handlerType string) bool {
|
||||
//Find the specified one
|
||||
// Find the specified one
|
||||
if hd, existing := indexer[handlerType]; existing {
|
||||
delete(indexer, handlerType)
|
||||
if len(indexer) == 0 {
|
||||
//No handler existing, then remove topic
|
||||
// No handler existing, then remove topic
|
||||
delete(nw.handlers, topic)
|
||||
}
|
||||
|
||||
//Update channel counter or remove channel
|
||||
// Update channel counter or remove channel
|
||||
if hd.IsStateful() {
|
||||
if theChan, yes := nw.handlerChannels[handlerType]; yes {
|
||||
theChan.boundCount--
|
||||
if theChan.boundCount == 0 {
|
||||
//Empty, then remove the channel
|
||||
// Empty, then remove the channel
|
||||
delete(nw.handlerChannels, handlerType)
|
||||
}
|
||||
}
|
||||
@ -149,7 +149,7 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Revoke the specified handler.
|
||||
// Revoke the specified handler.
|
||||
if revokeHandler(indexer, handler) {
|
||||
return nil
|
||||
}
|
||||
@ -158,7 +158,7 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
|
||||
return fmt.Errorf("Failed to revoke handler %s with topic %s", handler, topic)
|
||||
}
|
||||
|
||||
//Notify that notification is coming.
|
||||
// Notify that notification is coming.
|
||||
func (nw *NotificationWatcher) Notify(notification Notification) error {
|
||||
if strings.TrimSpace(notification.Topic) == "" {
|
||||
return errors.New("Empty topic can not be notified")
|
||||
@ -180,7 +180,7 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
|
||||
handlers = append(handlers, h)
|
||||
}
|
||||
|
||||
//Trigger handlers
|
||||
// Trigger handlers
|
||||
for _, h := range handlers {
|
||||
var handlerChan chan bool
|
||||
if h.IsStateful() {
|
||||
@ -198,7 +198,7 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
|
||||
}
|
||||
}()
|
||||
if err := hd.Handle(notification.Value); err != nil {
|
||||
//Currently, we just log the error
|
||||
// Currently, we just log the error
|
||||
log.Errorf("Error occurred when triggering handler %s of topic %s: %s\n", reflect.TypeOf(hd).String(), notification.Topic, err.Error())
|
||||
} else {
|
||||
log.Infof("Handle notification with topic '%s': %#v\n", notification.Topic, notification.Value)
|
||||
@ -210,17 +210,17 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Subscribe is a wrapper utility method for NotificationWatcher.handle()
|
||||
// Subscribe is a wrapper utility method for NotificationWatcher.handle()
|
||||
func Subscribe(topic string, handler NotificationHandler) error {
|
||||
return notificationWatcher.Handle(topic, handler)
|
||||
}
|
||||
|
||||
//UnSubscribe is a wrapper utility method for NotificationWatcher.UnHandle()
|
||||
// UnSubscribe is a wrapper utility method for NotificationWatcher.UnHandle()
|
||||
func UnSubscribe(topic string, handler string) error {
|
||||
return notificationWatcher.UnHandle(topic, handler)
|
||||
}
|
||||
|
||||
//Publish is a wrapper utility method for NotificationWatcher.notify()
|
||||
// Publish is a wrapper utility method for NotificationWatcher.notify()
|
||||
func Publish(topic string, value interface{}) error {
|
||||
return notificationWatcher.Notify(Notification{
|
||||
Topic: topic,
|
||||
|
@ -128,7 +128,7 @@ func TestPublish(t *testing.T) {
|
||||
Publish("topic1", 100)
|
||||
Publish("topic2", 50)
|
||||
|
||||
//Waiting for async is done
|
||||
// Waiting for async is done
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
finalData := atomic.LoadInt32(&statefulData)
|
||||
@ -146,7 +146,7 @@ func TestPublish(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//Clear stateful data.
|
||||
// Clear stateful data.
|
||||
atomic.StoreInt32(&statefulData, 0)
|
||||
}
|
||||
|
||||
@ -161,12 +161,12 @@ func TestConcurrentPublish(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
//Publish in a short interval.
|
||||
// Publish in a short interval.
|
||||
for i := 0; i < 10; i++ {
|
||||
Publish("topic1", 100)
|
||||
}
|
||||
|
||||
//Waiting for async is done
|
||||
// Waiting for async is done
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
finalData := atomic.LoadInt32(&statefulData)
|
||||
@ -179,7 +179,7 @@ func TestConcurrentPublish(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//Clear stateful data.
|
||||
// Clear stateful data.
|
||||
atomic.StoreInt32(&statefulData, 0)
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ func TestConcurrentPublishWithScanPolicyHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//Wating for everything is ready.
|
||||
// Wating for everything is ready.
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
if err := UnSubscribe("testing_topic", ""); err != nil {
|
||||
@ -218,7 +218,7 @@ func TestConcurrentPublishWithScanPolicyHandler(t *testing.T) {
|
||||
}
|
||||
|
||||
scheduler.DefaultScheduler.Stop()
|
||||
//Wating for everything is ready.
|
||||
// Wating for everything is ready.
|
||||
<-time.After(1 * time.Second)
|
||||
if scheduler.DefaultScheduler.IsRunning() {
|
||||
t.Fatal("Policy scheduler is not stopped")
|
||||
|
@ -13,35 +13,35 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//PolicyTypeDaily specify the policy type is "daily"
|
||||
// PolicyTypeDaily specify the policy type is "daily"
|
||||
PolicyTypeDaily = "daily"
|
||||
|
||||
//PolicyTypeNone specify the policy type is "none"
|
||||
// PolicyTypeNone specify the policy type is "none"
|
||||
PolicyTypeNone = "none"
|
||||
|
||||
alternatePolicy = "Alternate Policy"
|
||||
)
|
||||
|
||||
//ScanPolicyNotification is defined for pass the policy change data.
|
||||
// ScanPolicyNotification is defined for pass the policy change data.
|
||||
type ScanPolicyNotification struct {
|
||||
//Type is used to keep the scan policy type: "none","daily" and "refresh".
|
||||
// Type is used to keep the scan policy type: "none","daily" and "refresh".
|
||||
Type string
|
||||
|
||||
//DailyTime is used when the type is 'daily', the offset with UTC time 00:00.
|
||||
// DailyTime is used when the type is 'daily', the offset with UTC time 00:00.
|
||||
DailyTime int64
|
||||
}
|
||||
|
||||
//ScanPolicyNotificationHandler is defined to handle the changes of scanning
|
||||
//policy.
|
||||
// ScanPolicyNotificationHandler is defined to handle the changes of scanning
|
||||
// policy.
|
||||
type ScanPolicyNotificationHandler struct{}
|
||||
|
||||
//IsStateful to indicate this handler is stateful.
|
||||
// IsStateful to indicate this handler is stateful.
|
||||
func (s *ScanPolicyNotificationHandler) IsStateful() bool {
|
||||
//Policy change should be done one by one.
|
||||
// Policy change should be done one by one.
|
||||
return true
|
||||
}
|
||||
|
||||
//Handle the policy change notification.
|
||||
// Handle the policy change notification.
|
||||
func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
|
||||
if value == nil {
|
||||
return errors.New("ScanPolicyNotificationHandler can not handle nil value")
|
||||
@ -57,27 +57,27 @@ func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
|
||||
hasScheduled := scheduler.DefaultScheduler.HasScheduled(alternatePolicy)
|
||||
if notification.Type == PolicyTypeDaily {
|
||||
if !hasScheduled {
|
||||
//Schedule a new policy.
|
||||
// Schedule a new policy.
|
||||
return schedulePolicy(notification)
|
||||
}
|
||||
|
||||
//To check and compare if the related parameter is changed.
|
||||
// To check and compare if the related parameter is changed.
|
||||
if pl := scheduler.DefaultScheduler.GetPolicy(alternatePolicy); pl != nil {
|
||||
policyCandidate := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{
|
||||
Duration: 24 * time.Hour,
|
||||
OffsetTime: notification.DailyTime,
|
||||
})
|
||||
if !pl.Equal(policyCandidate) {
|
||||
//Parameter changed.
|
||||
//Unschedule policy.
|
||||
// Parameter changed.
|
||||
// Unschedule policy.
|
||||
if err := scheduler.DefaultScheduler.UnSchedule(alternatePolicy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//Schedule a new policy.
|
||||
// Schedule a new policy.
|
||||
return schedulePolicy(notification)
|
||||
}
|
||||
//Same policy configuration, do nothing
|
||||
// Same policy configuration, do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -93,7 +93,7 @@ func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Schedule policy.
|
||||
// Schedule policy.
|
||||
func schedulePolicy(notification ScanPolicyNotification) error {
|
||||
schedulePolicy := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{
|
||||
Duration: 24 * time.Hour,
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
var testingScheduler = scheduler.DefaultScheduler
|
||||
|
||||
func TestScanPolicyNotificationHandler(t *testing.T) {
|
||||
//Scheduler should be running.
|
||||
// Scheduler should be running.
|
||||
testingScheduler.Start()
|
||||
if !testingScheduler.IsRunning() {
|
||||
t.Fatal("scheduler should be running")
|
||||
@ -32,7 +32,7 @@ func TestScanPolicyNotificationHandler(t *testing.T) {
|
||||
t.Fatal("Handler does not work")
|
||||
}
|
||||
|
||||
//Policy parameter changed.
|
||||
// Policy parameter changed.
|
||||
notification2 := ScanPolicyNotification{"daily", utcTime + 7200}
|
||||
if err := handler.Handle(notification2); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -63,9 +63,9 @@ func TestScanPolicyNotificationHandler(t *testing.T) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
//Clear
|
||||
// Clear
|
||||
testingScheduler.Stop()
|
||||
//Waiting for everything is ready.
|
||||
// Waiting for everything is ready.
|
||||
<-time.After(1 * time.Second)
|
||||
if testingScheduler.IsRunning() {
|
||||
t.Fatal("scheduler should be stopped")
|
||||
|
@ -4,8 +4,8 @@ import (
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
)
|
||||
|
||||
//Define global topic names
|
||||
// Define global topic names
|
||||
const (
|
||||
//ScanAllPolicyTopic is for notifying the change of scanning all policy.
|
||||
// ScanAllPolicyTopic is for notifying the change of scanning all policy.
|
||||
ScanAllPolicyTopic = common.ScanAllPolicy
|
||||
)
|
||||
|
@ -14,52 +14,52 @@ const (
|
||||
oneDay = 24 * 3600
|
||||
)
|
||||
|
||||
//AlternatePolicyConfiguration store the related configurations for alternate policy.
|
||||
// AlternatePolicyConfiguration store the related configurations for alternate policy.
|
||||
type AlternatePolicyConfiguration struct {
|
||||
//Duration is the interval of executing attached tasks.
|
||||
//E.g: 24*3600 for daily
|
||||
// Duration is the interval of executing attached tasks.
|
||||
// E.g: 24*3600 for daily
|
||||
// 7*24*3600 for weekly
|
||||
Duration time.Duration
|
||||
|
||||
//An integer to indicate the the weekday of the week. Please be noted that Sunday is 7.
|
||||
//Use default value 0 to indicate weekday is not set.
|
||||
//To support by weekly function.
|
||||
// An integer to indicate the the weekday of the week. Please be noted that Sunday is 7.
|
||||
// Use default value 0 to indicate weekday is not set.
|
||||
// To support by weekly function.
|
||||
Weekday int8
|
||||
|
||||
//OffsetTime is the execution time point of each turn
|
||||
//It's a number to indicate the seconds offset to the 00:00 of UTC time.
|
||||
// OffsetTime is the execution time point of each turn
|
||||
// It's a number to indicate the seconds offset to the 00:00 of UTC time.
|
||||
OffsetTime int64
|
||||
}
|
||||
|
||||
//AlternatePolicy is a policy that repeatedly executing tasks with specified duration during a specified time scope.
|
||||
// AlternatePolicy is a policy that repeatedly executing tasks with specified duration during a specified time scope.
|
||||
type AlternatePolicy struct {
|
||||
//To sync the related operations.
|
||||
// To sync the related operations.
|
||||
*sync.RWMutex
|
||||
|
||||
//Keep the attached tasks.
|
||||
// Keep the attached tasks.
|
||||
tasks task.Store
|
||||
|
||||
//Policy configurations.
|
||||
// Policy configurations.
|
||||
config *AlternatePolicyConfiguration
|
||||
|
||||
//To indicated whether policy is enabled or not.
|
||||
// To indicated whether policy is enabled or not.
|
||||
isEnabled bool
|
||||
|
||||
//Channel used to send evaluation result signals.
|
||||
// Channel used to send evaluation result signals.
|
||||
evaluation chan bool
|
||||
|
||||
//Channel used to notify policy termination.
|
||||
// Channel used to notify policy termination.
|
||||
done chan bool
|
||||
|
||||
//Channel used to receive terminate signal.
|
||||
// Channel used to receive terminate signal.
|
||||
terminator chan bool
|
||||
|
||||
//Unique name of this policy to support multiple instances
|
||||
// Unique name of this policy to support multiple instances
|
||||
name string
|
||||
}
|
||||
|
||||
//NewAlternatePolicy is constructor of creating AlternatePolicy.
|
||||
//Accept name and configuration as parameters.
|
||||
// NewAlternatePolicy is constructor of creating AlternatePolicy.
|
||||
// Accept name and configuration as parameters.
|
||||
func NewAlternatePolicy(name string, config *AlternatePolicyConfiguration) *AlternatePolicy {
|
||||
return &AlternatePolicy{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
@ -71,27 +71,27 @@ func NewAlternatePolicy(name string, config *AlternatePolicyConfiguration) *Alte
|
||||
}
|
||||
}
|
||||
|
||||
//GetConfig returns the current configuration options of this policy.
|
||||
// GetConfig returns the current configuration options of this policy.
|
||||
func (alp *AlternatePolicy) GetConfig() *AlternatePolicyConfiguration {
|
||||
return alp.config
|
||||
}
|
||||
|
||||
//Name is an implementation of same method in policy interface.
|
||||
// Name is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Name() string {
|
||||
return alp.name
|
||||
}
|
||||
|
||||
//Tasks is an implementation of same method in policy interface.
|
||||
// Tasks is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Tasks() []task.Task {
|
||||
return alp.tasks.GetTasks()
|
||||
}
|
||||
|
||||
//Done is an implementation of same method in policy interface.
|
||||
// Done is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Done() <-chan bool {
|
||||
return alp.done
|
||||
}
|
||||
|
||||
//AttachTasks is an implementation of same method in policy interface.
|
||||
// AttachTasks is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error {
|
||||
if len(tasks) == 0 {
|
||||
return errors.New("No tasks can be attached")
|
||||
@ -102,7 +102,7 @@ func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Disable is an implementation of same method in policy interface.
|
||||
// Disable is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Disable() error {
|
||||
alp.Lock()
|
||||
if !alp.isEnabled {
|
||||
@ -110,33 +110,33 @@ func (alp *AlternatePolicy) Disable() error {
|
||||
return fmt.Errorf("Instance of policy %s is not enabled", alp.Name())
|
||||
}
|
||||
|
||||
//Set state to disabled
|
||||
// Set state to disabled
|
||||
alp.isEnabled = false
|
||||
alp.Unlock()
|
||||
|
||||
//Stop the evaluation goroutine
|
||||
// Stop the evaluation goroutine
|
||||
alp.terminator <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//Evaluate is an implementation of same method in policy interface.
|
||||
// Evaluate is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
//Lock for state changing
|
||||
// Lock for state changing
|
||||
defer alp.Unlock()
|
||||
alp.Lock()
|
||||
|
||||
//Check if configuration is valid
|
||||
// Check if configuration is valid
|
||||
if !alp.isValidConfig() {
|
||||
return nil, errors.New("Policy configuration is not valid")
|
||||
}
|
||||
|
||||
//Check if policy instance is still running
|
||||
// Check if policy instance is still running
|
||||
if alp.isEnabled {
|
||||
return nil, fmt.Errorf("Instance of policy %s is still running", alp.Name())
|
||||
}
|
||||
|
||||
//Keep idempotent
|
||||
// Keep idempotent
|
||||
if alp.evaluation != nil {
|
||||
return alp.evaluation, nil
|
||||
}
|
||||
@ -150,8 +150,8 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
)
|
||||
timeNow := time.Now().UTC()
|
||||
|
||||
//Reach the execution time point?
|
||||
//Weekday is set
|
||||
// Reach the execution time point?
|
||||
// Weekday is set
|
||||
if alp.config.Weekday > 0 {
|
||||
targetWeekday := (alp.config.Weekday + 7) % 7
|
||||
currentWeekday := timeNow.Weekday()
|
||||
@ -162,7 +162,7 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
waitingTime = (int64)(weekdayDiff * oneDay)
|
||||
}
|
||||
|
||||
//Time
|
||||
// Time
|
||||
utcTime := (int64)(timeNow.Hour()*3600 + timeNow.Minute()*60)
|
||||
diff := alp.config.OffsetTime - utcTime
|
||||
if waitingTime > 0 {
|
||||
@ -174,9 +174,9 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
//Let's wait for a while
|
||||
// Let's wait for a while
|
||||
if waitingTime > 0 {
|
||||
//Wait for a while.
|
||||
// Wait for a while.
|
||||
log.Infof("Waiting for %d seconds after comparing offset %d and utc time %d\n", diff, alp.config.OffsetTime, utcTime)
|
||||
select {
|
||||
case <-time.After(time.Duration(waitingTime) * time.Second):
|
||||
@ -185,10 +185,10 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
//Trigger the first tick.
|
||||
// Trigger the first tick.
|
||||
alp.evaluation <- true
|
||||
|
||||
//Start the ticker for repeat checking.
|
||||
// Start the ticker for repeat checking.
|
||||
tk := time.NewTicker(alp.config.Duration)
|
||||
defer func() {
|
||||
if tk != nil {
|
||||
@ -208,13 +208,13 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
//Enabled
|
||||
// Enabled
|
||||
alp.isEnabled = true
|
||||
|
||||
return alp.evaluation, nil
|
||||
}
|
||||
|
||||
//Equal is an implementation of same method in policy interface.
|
||||
// Equal is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) Equal(p Policy) bool {
|
||||
if p == nil {
|
||||
return false
|
||||
@ -237,7 +237,7 @@ func (alp *AlternatePolicy) Equal(p Policy) bool {
|
||||
cfg.Weekday == cfg2.Weekday)
|
||||
}
|
||||
|
||||
//IsEnabled is an implementation of same method in policy interface.
|
||||
// IsEnabled is an implementation of same method in policy interface.
|
||||
func (alp *AlternatePolicy) IsEnabled() bool {
|
||||
defer alp.RUnlock()
|
||||
alp.RLock()
|
||||
@ -245,7 +245,7 @@ func (alp *AlternatePolicy) IsEnabled() bool {
|
||||
return alp.isEnabled
|
||||
}
|
||||
|
||||
//Check if the config is valid. At least it should have the configurations for supporting daily policy.
|
||||
// Check if the config is valid. At least it should have the configurations for supporting daily policy.
|
||||
func (alp *AlternatePolicy) isValidConfig() bool {
|
||||
return alp.config != nil && alp.config.Duration > 0 && alp.config.OffsetTime >= 0
|
||||
}
|
||||
|
@ -112,9 +112,9 @@ func TestDisablePolicy(t *testing.T) {
|
||||
if tp.Disable() != nil {
|
||||
t.Fatal("Failed to disable policy")
|
||||
}
|
||||
//Waiting for everything is stable
|
||||
// Waiting for everything is stable
|
||||
<-time.After(1 * time.Second)
|
||||
//Copy value
|
||||
// Copy value
|
||||
var copiedCounter int32
|
||||
atomic.StoreInt32(&copiedCounter, atomic.LoadInt32(&counter))
|
||||
time.Sleep(2 * time.Second)
|
||||
|
@ -4,45 +4,45 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/scheduler/task"
|
||||
)
|
||||
|
||||
//Policy is an if-then logic to determine how the attached tasks should be
|
||||
//executed based on the evaluation result of the defined conditions.
|
||||
//E.g:
|
||||
// Policy is an if-then logic to determine how the attached tasks should be
|
||||
// executed based on the evaluation result of the defined conditions.
|
||||
// E.g:
|
||||
// Daily execute TASK between 2017/06/24 and 2018/06/23
|
||||
// Execute TASK at 2017/09/01 14:30:00
|
||||
//
|
||||
//Each policy should have a name to identify itself.
|
||||
//Please be aware that policy with no tasks will be treated as invalid.
|
||||
// Each policy should have a name to identify itself.
|
||||
// Please be aware that policy with no tasks will be treated as invalid.
|
||||
//
|
||||
type Policy interface {
|
||||
//Name will return the name of the policy.
|
||||
//If the policy supports multiple instances, please make sure the name is unique as an UUID.
|
||||
// Name will return the name of the policy.
|
||||
// If the policy supports multiple instances, please make sure the name is unique as an UUID.
|
||||
Name() string
|
||||
|
||||
//Tasks will return the attached tasks with this policy.
|
||||
// Tasks will return the attached tasks with this policy.
|
||||
Tasks() []task.Task
|
||||
|
||||
//AttachTasks is to attach tasks to this policy
|
||||
// AttachTasks is to attach tasks to this policy
|
||||
AttachTasks(...task.Task) error
|
||||
|
||||
//Done will setup a channel for other components to check whether or not
|
||||
//the policy is completed. Possibly designed for the none loop policy.
|
||||
// Done will setup a channel for other components to check whether or not
|
||||
// the policy is completed. Possibly designed for the none loop policy.
|
||||
Done() <-chan bool
|
||||
|
||||
//Evaluate the policy based on its definition and return the result via
|
||||
//result channel. Policy is enabled after it is evaluated.
|
||||
//Make sure Evaluate is idempotent, that means one policy can be only enabled
|
||||
//only once even if Evaluate is called more than one times.
|
||||
// Evaluate the policy based on its definition and return the result via
|
||||
// result channel. Policy is enabled after it is evaluated.
|
||||
// Make sure Evaluate is idempotent, that means one policy can be only enabled
|
||||
// only once even if Evaluate is called more than one times.
|
||||
Evaluate() (<-chan bool, error)
|
||||
|
||||
//Disable the enabled policy and release all the allocated resources.
|
||||
// Disable the enabled policy and release all the allocated resources.
|
||||
Disable() error
|
||||
|
||||
//Equal will compare the two policies based on related factors if existing such as confgiuration etc.
|
||||
//to determine whether the two policies are same ones or not. Please pay attention that, not every policy
|
||||
//needs to support this method. If no need, please directly return false to indicate each policies are
|
||||
//different.
|
||||
// Equal will compare the two policies based on related factors if existing such as confgiuration etc.
|
||||
// to determine whether the two policies are same ones or not. Please pay attention that, not every policy
|
||||
// needs to support this method. If no need, please directly return false to indicate each policies are
|
||||
// different.
|
||||
Equal(p Policy) bool
|
||||
|
||||
//IsEnabled is to indicate whether the policy is enabled or not (disabled).
|
||||
// IsEnabled is to indicate whether the policy is enabled or not (disabled).
|
||||
IsEnabled() bool
|
||||
}
|
||||
|
@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//NewUUID will generate a new UUID.
|
||||
//Code copied from https://play.golang.org/p/4FkNSiUDMg
|
||||
// NewUUID will generate a new UUID.
|
||||
// Code copied from https://play.golang.org/p/4FkNSiUDMg
|
||||
func newUUID() (string, error) {
|
||||
uuid := make([]byte, 16)
|
||||
n, err := io.ReadFull(rand.Reader, uuid)
|
||||
|
@ -22,72 +22,72 @@ const (
|
||||
statTaskFail = "Task Fail"
|
||||
)
|
||||
|
||||
//StatItem is defined for the stat metrics.
|
||||
// StatItem is defined for the stat metrics.
|
||||
type StatItem struct {
|
||||
//Metrics catalog
|
||||
// Metrics catalog
|
||||
Type string
|
||||
|
||||
//The stat value
|
||||
// The stat value
|
||||
Value uint32
|
||||
|
||||
//Attach some other info
|
||||
// Attach some other info
|
||||
Attachment interface{}
|
||||
}
|
||||
|
||||
//StatSummary is used to collect some metrics of scheduler.
|
||||
// StatSummary is used to collect some metrics of scheduler.
|
||||
type StatSummary struct {
|
||||
//Count of scheduled policy
|
||||
// Count of scheduled policy
|
||||
PolicyCount uint32
|
||||
|
||||
//Total count of tasks
|
||||
// Total count of tasks
|
||||
Tasks uint32
|
||||
|
||||
//Count of successfully complete tasks
|
||||
// Count of successfully complete tasks
|
||||
CompletedTasks uint32
|
||||
|
||||
//Count of tasks with errors
|
||||
// Count of tasks with errors
|
||||
TasksWithError uint32
|
||||
}
|
||||
|
||||
//Configuration defines configuration of Scheduler.
|
||||
// Configuration defines configuration of Scheduler.
|
||||
type Configuration struct {
|
||||
QueueSize uint8
|
||||
}
|
||||
|
||||
//Scheduler is designed for scheduling policies.
|
||||
// Scheduler is designed for scheduling policies.
|
||||
type Scheduler struct {
|
||||
//Mutex for sync controlling.
|
||||
// Mutex for sync controlling.
|
||||
*sync.RWMutex
|
||||
|
||||
//Related configuration options for scheduler.
|
||||
// Related configuration options for scheduler.
|
||||
config *Configuration
|
||||
|
||||
//Store to keep the references of scheduled policies.
|
||||
// Store to keep the references of scheduled policies.
|
||||
policies Store
|
||||
|
||||
//Queue for receiving policy scheduling request
|
||||
// Queue for receiving policy scheduling request
|
||||
scheduleQueue chan *Watcher
|
||||
|
||||
//Queue for receiving policy unscheduling request or complete signal.
|
||||
// Queue for receiving policy unscheduling request or complete signal.
|
||||
unscheduleQueue chan *Watcher
|
||||
|
||||
//Channel for receiving stat metrics.
|
||||
// Channel for receiving stat metrics.
|
||||
statChan chan *StatItem
|
||||
|
||||
//Channel for terminate scheduler damon.
|
||||
// Channel for terminate scheduler damon.
|
||||
terminateChan chan bool
|
||||
|
||||
//The stat metrics of scheduler.
|
||||
// The stat metrics of scheduler.
|
||||
stats *StatSummary
|
||||
|
||||
//To indicate whether scheduler is running or not
|
||||
// To indicate whether scheduler is running or not
|
||||
isRunning bool
|
||||
}
|
||||
|
||||
//DefaultScheduler is a default scheduler.
|
||||
// DefaultScheduler is a default scheduler.
|
||||
var DefaultScheduler = NewScheduler(nil)
|
||||
|
||||
//NewScheduler is constructor for creating a scheduler.
|
||||
// NewScheduler is constructor for creating a scheduler.
|
||||
func NewScheduler(config *Configuration) *Scheduler {
|
||||
var qSize uint8 = defaultQueueSize
|
||||
if config != nil && config.QueueSize > 0 {
|
||||
@ -118,12 +118,12 @@ func NewScheduler(config *Configuration) *Scheduler {
|
||||
}
|
||||
}
|
||||
|
||||
//Start the scheduler damon.
|
||||
// Start the scheduler damon.
|
||||
func (sch *Scheduler) Start() {
|
||||
sch.Lock()
|
||||
defer sch.Unlock()
|
||||
|
||||
//If scheduler is already running
|
||||
// If scheduler is already running
|
||||
if sch.isRunning {
|
||||
return
|
||||
}
|
||||
@ -135,32 +135,32 @@ func (sch *Scheduler) Start() {
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
//Clear resources
|
||||
// Clear resources
|
||||
sch.policies.Clear()
|
||||
log.Infof("Policy scheduler stop at %s\n", time.Now().UTC().Format(time.RFC3339))
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-sch.terminateChan:
|
||||
//Exit
|
||||
// Exit
|
||||
return
|
||||
case wt := <-sch.scheduleQueue:
|
||||
//If status is stopped, no requests should be served
|
||||
// If status is stopped, no requests should be served
|
||||
if !sch.IsRunning() {
|
||||
continue
|
||||
}
|
||||
go func(watcher *Watcher) {
|
||||
if watcher != nil && watcher.p != nil {
|
||||
//Enable it.
|
||||
// Enable it.
|
||||
watcher.Start()
|
||||
|
||||
//Update stats and log info.
|
||||
// Update stats and log info.
|
||||
log.Infof("Policy %s is scheduled", watcher.p.Name())
|
||||
sch.statChan <- &StatItem{statSchedulePolicy, 1, nil}
|
||||
}
|
||||
}(wt)
|
||||
case wt := <-sch.unscheduleQueue:
|
||||
//If status is stopped, no requests should be served
|
||||
// If status is stopped, no requests should be served
|
||||
if !sch.IsRunning() {
|
||||
continue
|
||||
}
|
||||
@ -168,14 +168,14 @@ func (sch *Scheduler) Start() {
|
||||
if watcher != nil && watcher.IsRunning() {
|
||||
watcher.Stop()
|
||||
|
||||
//Update stats and log info.
|
||||
// Update stats and log info.
|
||||
log.Infof("Policy %s is unscheduled", watcher.p.Name())
|
||||
sch.statChan <- &StatItem{statUnSchedulePolicy, 1, nil}
|
||||
}
|
||||
}(wt)
|
||||
case stat := <-sch.statChan:
|
||||
{
|
||||
//If status is stopped, no requests should be served
|
||||
// If status is stopped, no requests should be served
|
||||
if !sch.IsRunning() {
|
||||
continue
|
||||
}
|
||||
@ -218,12 +218,12 @@ func (sch *Scheduler) Start() {
|
||||
log.Infof("Policy scheduler start at %s\n", time.Now().UTC().Format(time.RFC3339))
|
||||
}
|
||||
|
||||
//Stop the scheduler damon.
|
||||
// Stop the scheduler damon.
|
||||
func (sch *Scheduler) Stop() {
|
||||
//Lock for state changing
|
||||
// Lock for state changing
|
||||
sch.Lock()
|
||||
|
||||
//Check if the scheduler is running
|
||||
// Check if the scheduler is running
|
||||
if !sch.isRunning {
|
||||
sch.Unlock()
|
||||
return
|
||||
@ -232,11 +232,11 @@ func (sch *Scheduler) Stop() {
|
||||
sch.isRunning = false
|
||||
sch.Unlock()
|
||||
|
||||
//Terminate damon to stop receiving signals.
|
||||
// Terminate damon to stop receiving signals.
|
||||
sch.terminateChan <- true
|
||||
}
|
||||
|
||||
//Schedule and enable the policy.
|
||||
// Schedule and enable the policy.
|
||||
func (sch *Scheduler) Schedule(scheduledPolicy policy.Policy) error {
|
||||
if scheduledPolicy == nil {
|
||||
return errors.New("nil is not Policy object")
|
||||
@ -251,38 +251,38 @@ func (sch *Scheduler) Schedule(scheduledPolicy policy.Policy) error {
|
||||
return errors.New("Policy must attach task(s)")
|
||||
}
|
||||
|
||||
//Try to schedule the policy.
|
||||
//Keep the policy for future use after it's successfully scheduled.
|
||||
// Try to schedule the policy.
|
||||
// Keep the policy for future use after it's successfully scheduled.
|
||||
watcher := NewWatcher(scheduledPolicy, sch.statChan, sch.unscheduleQueue)
|
||||
if err := sch.policies.Put(scheduledPolicy.Name(), watcher); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//Schedule the policy
|
||||
// Schedule the policy
|
||||
sch.scheduleQueue <- watcher
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//UnSchedule the specified policy from the enabled policies list.
|
||||
// UnSchedule the specified policy from the enabled policies list.
|
||||
func (sch *Scheduler) UnSchedule(policyName string) error {
|
||||
if strings.TrimSpace(policyName) == "" {
|
||||
return errors.New("Empty policy name is invalid")
|
||||
}
|
||||
|
||||
//Find the watcher.
|
||||
// Find the watcher.
|
||||
watcher := sch.policies.Remove(policyName)
|
||||
if watcher == nil {
|
||||
return fmt.Errorf("Policy %s is not existing", policyName)
|
||||
}
|
||||
|
||||
//Unschedule the policy.
|
||||
// Unschedule the policy.
|
||||
sch.unscheduleQueue <- watcher
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//IsRunning to indicate whether the scheduler is running.
|
||||
// IsRunning to indicate whether the scheduler is running.
|
||||
func (sch *Scheduler) IsRunning() bool {
|
||||
sch.RLock()
|
||||
defer sch.RUnlock()
|
||||
@ -290,12 +290,12 @@ func (sch *Scheduler) IsRunning() bool {
|
||||
return sch.isRunning
|
||||
}
|
||||
|
||||
//HasScheduled is to check whether the given policy has been scheduled or not.
|
||||
// HasScheduled is to check whether the given policy has been scheduled or not.
|
||||
func (sch *Scheduler) HasScheduled(policyName string) bool {
|
||||
return sch.policies.Exists(policyName)
|
||||
}
|
||||
|
||||
//GetPolicy is used to get related policy reference by its name.
|
||||
// GetPolicy is used to get related policy reference by its name.
|
||||
func (sch *Scheduler) GetPolicy(policyName string) policy.Policy {
|
||||
wk := sch.policies.Get(policyName)
|
||||
if wk != nil {
|
||||
@ -305,7 +305,7 @@ func (sch *Scheduler) GetPolicy(policyName string) policy.Policy {
|
||||
return nil
|
||||
}
|
||||
|
||||
//PolicyCount returns the count of currently scheduled policies in the scheduler.
|
||||
// PolicyCount returns the count of currently scheduled policies in the scheduler.
|
||||
func (sch *Scheduler) PolicyCount() uint32 {
|
||||
return sch.policies.Size()
|
||||
}
|
||||
|
@ -7,46 +7,46 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
//Store define the basic operations for storing and managing policy watcher.
|
||||
// Store define the basic operations for storing and managing policy watcher.
|
||||
type Store interface {
|
||||
//Put a new policy in.
|
||||
// Put a new policy in.
|
||||
Put(key string, value *Watcher) error
|
||||
|
||||
//Get the corresponding policy with the key.
|
||||
// Get the corresponding policy with the key.
|
||||
Get(key string) *Watcher
|
||||
|
||||
//Exists is to check if the key existing in the store.
|
||||
// Exists is to check if the key existing in the store.
|
||||
Exists(key string) bool
|
||||
|
||||
//Remove the specified policy and return its reference.
|
||||
// Remove the specified policy and return its reference.
|
||||
Remove(key string) *Watcher
|
||||
|
||||
//Size return the total count of items in store.
|
||||
// Size return the total count of items in store.
|
||||
Size() uint32
|
||||
|
||||
//GetAll is to get all the items in the store.
|
||||
// GetAll is to get all the items in the store.
|
||||
GetAll() []*Watcher
|
||||
|
||||
//Clear store.
|
||||
// Clear store.
|
||||
Clear()
|
||||
}
|
||||
|
||||
//DefaultStore implements Store interface to keep the scheduled policies.
|
||||
//Not support concurrent sync.
|
||||
// DefaultStore implements Store interface to keep the scheduled policies.
|
||||
// Not support concurrent sync.
|
||||
type DefaultStore struct {
|
||||
//Support sync locking
|
||||
// Support sync locking
|
||||
*sync.RWMutex
|
||||
|
||||
//Map used to keep the policy list.
|
||||
// Map used to keep the policy list.
|
||||
data map[string]*Watcher
|
||||
}
|
||||
|
||||
//NewDefaultStore is used to create a new store and return the pointer reference.
|
||||
// NewDefaultStore is used to create a new store and return the pointer reference.
|
||||
func NewDefaultStore() *DefaultStore {
|
||||
return &DefaultStore{new(sync.RWMutex), make(map[string]*Watcher)}
|
||||
}
|
||||
|
||||
//Put a policy into store.
|
||||
// Put a policy into store.
|
||||
func (cs *DefaultStore) Put(key string, value *Watcher) error {
|
||||
if strings.TrimSpace(key) == "" || value == nil {
|
||||
return errors.New("Bad arguments")
|
||||
@ -64,7 +64,7 @@ func (cs *DefaultStore) Put(key string, value *Watcher) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Get policy via key.
|
||||
// Get policy via key.
|
||||
func (cs *DefaultStore) Get(key string) *Watcher {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return nil
|
||||
@ -76,7 +76,7 @@ func (cs *DefaultStore) Get(key string) *Watcher {
|
||||
return cs.data[key]
|
||||
}
|
||||
|
||||
//Exists is used to check whether or not the key exists in store.
|
||||
// Exists is used to check whether or not the key exists in store.
|
||||
func (cs *DefaultStore) Exists(key string) bool {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return false
|
||||
@ -90,7 +90,7 @@ func (cs *DefaultStore) Exists(key string) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
//Remove is to delete the specified policy.
|
||||
// Remove is to delete the specified policy.
|
||||
func (cs *DefaultStore) Remove(key string) *Watcher {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
return nil
|
||||
@ -107,7 +107,7 @@ func (cs *DefaultStore) Remove(key string) *Watcher {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Size return the total count of items in store.
|
||||
// Size return the total count of items in store.
|
||||
func (cs *DefaultStore) Size() uint32 {
|
||||
cs.RLock()
|
||||
defer cs.RUnlock()
|
||||
@ -115,7 +115,7 @@ func (cs *DefaultStore) Size() uint32 {
|
||||
return (uint32)(len(cs.data))
|
||||
}
|
||||
|
||||
//GetAll to get all the items of store.
|
||||
// GetAll to get all the items of store.
|
||||
func (cs *DefaultStore) GetAll() []*Watcher {
|
||||
cs.RLock()
|
||||
defer cs.RUnlock()
|
||||
@ -129,7 +129,7 @@ func (cs *DefaultStore) GetAll() []*Watcher {
|
||||
return all
|
||||
}
|
||||
|
||||
//Clear all the items in store.
|
||||
// Clear all the items in store.
|
||||
func (cs *DefaultStore) Clear() {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
@ -88,7 +88,7 @@ func (ft *fakeTask) Number() int32 {
|
||||
return atomic.LoadInt32(&(ft.number))
|
||||
}
|
||||
|
||||
//Wacher will be tested together with scheduler.
|
||||
// Wacher will be tested together with scheduler.
|
||||
func TestScheduler(t *testing.T) {
|
||||
DefaultScheduler.Start()
|
||||
if DefaultScheduler.policies.Size() != 0 {
|
||||
|
@ -6,24 +6,24 @@ import (
|
||||
"github.com/goharbor/harbor/src/replication/event/topic"
|
||||
)
|
||||
|
||||
//Task is the task for triggering one replication
|
||||
// Task is the task for triggering one replication
|
||||
type Task struct {
|
||||
PolicyID int64
|
||||
}
|
||||
|
||||
//NewTask is constructor of creating ReplicationTask
|
||||
// NewTask is constructor of creating ReplicationTask
|
||||
func NewTask(policyID int64) *Task {
|
||||
return &Task{
|
||||
PolicyID: policyID,
|
||||
}
|
||||
}
|
||||
|
||||
//Name returns the name of this task
|
||||
// Name returns the name of this task
|
||||
func (t *Task) Name() string {
|
||||
return "replication"
|
||||
}
|
||||
|
||||
//Run the actions here
|
||||
// Run the actions here
|
||||
func (t *Task) Run() error {
|
||||
return notifier.Publish(topic.StartReplicationTopic, notification.StartReplicationNotification{
|
||||
PolicyID: t.PolicyID,
|
||||
|
@ -4,20 +4,20 @@ import (
|
||||
"github.com/goharbor/harbor/src/ui/utils"
|
||||
)
|
||||
|
||||
//ScanAllTask is task of scanning all tags.
|
||||
// ScanAllTask is task of scanning all tags.
|
||||
type ScanAllTask struct{}
|
||||
|
||||
//NewScanAllTask is constructor of creating ScanAllTask.
|
||||
// NewScanAllTask is constructor of creating ScanAllTask.
|
||||
func NewScanAllTask() *ScanAllTask {
|
||||
return &ScanAllTask{}
|
||||
}
|
||||
|
||||
//Name returns the name of the task.
|
||||
// Name returns the name of the task.
|
||||
func (sat *ScanAllTask) Name() string {
|
||||
return "scan all"
|
||||
}
|
||||
|
||||
//Run the actions.
|
||||
// Run the actions.
|
||||
func (sat *ScanAllTask) Run() error {
|
||||
return utils.ScanAllImages()
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
package task
|
||||
|
||||
//Task is used to synchronously run specific action(s).
|
||||
// Task is used to synchronously run specific action(s).
|
||||
type Task interface {
|
||||
//Name should return the name of the task.
|
||||
// Name should return the name of the task.
|
||||
Name() string
|
||||
|
||||
//Run the concrete code here
|
||||
// Run the concrete code here
|
||||
Run() error
|
||||
}
|
||||
|
@ -4,30 +4,30 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
//Store is designed to keep the tasks.
|
||||
// Store is designed to keep the tasks.
|
||||
type Store interface {
|
||||
//GetTasks return the current existing list in store.
|
||||
// GetTasks return the current existing list in store.
|
||||
GetTasks() []Task
|
||||
|
||||
//AddTasks is used to append tasks to the list.
|
||||
// AddTasks is used to append tasks to the list.
|
||||
AddTasks(tasks ...Task)
|
||||
}
|
||||
|
||||
//DefaultStore is the default implemetation of Store interface.
|
||||
// DefaultStore is the default implemetation of Store interface.
|
||||
type DefaultStore struct {
|
||||
//To sync the related operations.
|
||||
// To sync the related operations.
|
||||
*sync.RWMutex
|
||||
|
||||
//The space to keep the tasks.
|
||||
// The space to keep the tasks.
|
||||
tasks []Task
|
||||
}
|
||||
|
||||
//NewDefaultStore is constructor method for DefaultStore.
|
||||
// NewDefaultStore is constructor method for DefaultStore.
|
||||
func NewDefaultStore() *DefaultStore {
|
||||
return &DefaultStore{new(sync.RWMutex), []Task{}}
|
||||
}
|
||||
|
||||
//GetTasks implements the same method in Store interface.
|
||||
// GetTasks implements the same method in Store interface.
|
||||
func (ds *DefaultStore) GetTasks() []Task {
|
||||
copyList := []Task{}
|
||||
|
||||
@ -41,9 +41,9 @@ func (ds *DefaultStore) GetTasks() []Task {
|
||||
return copyList
|
||||
}
|
||||
|
||||
//AddTasks implements the same method in Store interface.
|
||||
// AddTasks implements the same method in Store interface.
|
||||
func (ds *DefaultStore) AddTasks(tasks ...Task) {
|
||||
//Double confirm.
|
||||
// Double confirm.
|
||||
if ds.tasks == nil {
|
||||
ds.tasks = []Task{}
|
||||
}
|
||||
|
@ -9,28 +9,28 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
//Watcher is an asynchronous runner to provide an evaluation environment for the policy.
|
||||
// Watcher is an asynchronous runner to provide an evaluation environment for the policy.
|
||||
type Watcher struct {
|
||||
//Locker to sync related operations.
|
||||
// Locker to sync related operations.
|
||||
*sync.RWMutex
|
||||
|
||||
//The target policy.
|
||||
// The target policy.
|
||||
p policy.Policy
|
||||
|
||||
//The channel for receive stop signal.
|
||||
// The channel for receive stop signal.
|
||||
cmdChan chan bool
|
||||
|
||||
//Indicate whether the watcher is started and running.
|
||||
// Indicate whether the watcher is started and running.
|
||||
isRunning bool
|
||||
|
||||
//Report stats to scheduler.
|
||||
// Report stats to scheduler.
|
||||
stats chan *StatItem
|
||||
|
||||
//If policy is automatically completed, report the policy to scheduler.
|
||||
// If policy is automatically completed, report the policy to scheduler.
|
||||
doneChan chan *Watcher
|
||||
}
|
||||
|
||||
//NewWatcher is used as a constructor.
|
||||
// NewWatcher is used as a constructor.
|
||||
func NewWatcher(p policy.Policy, st chan *StatItem, done chan *Watcher) *Watcher {
|
||||
return &Watcher{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
@ -42,9 +42,9 @@ func NewWatcher(p policy.Policy, st chan *StatItem, done chan *Watcher) *Watcher
|
||||
}
|
||||
}
|
||||
|
||||
//Start the running.
|
||||
// Start the running.
|
||||
func (wc *Watcher) Start() {
|
||||
//Lock for state changing
|
||||
// Lock for state changing
|
||||
wc.Lock()
|
||||
defer wc.Unlock()
|
||||
|
||||
@ -74,13 +74,13 @@ func (wc *Watcher) Start() {
|
||||
select {
|
||||
case <-evalChan:
|
||||
{
|
||||
//If worker is not running, should not response any requests.
|
||||
// If worker is not running, should not response any requests.
|
||||
if !wc.IsRunning() {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Infof("Receive evaluation signal from policy '%s'\n", pl.Name())
|
||||
//Start to run the attached tasks.
|
||||
// Start to run the attached tasks.
|
||||
for _, t := range pl.Tasks() {
|
||||
go func(tk task.Task) {
|
||||
defer func() {
|
||||
@ -93,7 +93,7 @@ func (wc *Watcher) Start() {
|
||||
}()
|
||||
err := tk.Run()
|
||||
|
||||
//Report task execution stats.
|
||||
// Report task execution stats.
|
||||
st := &StatItem{statTaskComplete, 1, err}
|
||||
if err != nil {
|
||||
st.Type = statTaskFail
|
||||
@ -103,7 +103,7 @@ func (wc *Watcher) Start() {
|
||||
}
|
||||
}(t)
|
||||
|
||||
//Report task run stats.
|
||||
// Report task run stats.
|
||||
st := &StatItem{statTaskRun, 1, nil}
|
||||
if wc.stats != nil {
|
||||
wc.stats <- st
|
||||
@ -112,8 +112,8 @@ func (wc *Watcher) Start() {
|
||||
}
|
||||
case <-done:
|
||||
{
|
||||
//Policy is automatically completed.
|
||||
//Report policy change stats.
|
||||
// Policy is automatically completed.
|
||||
// Report policy change stats.
|
||||
if wc.doneChan != nil {
|
||||
wc.doneChan <- wc
|
||||
}
|
||||
@ -121,7 +121,7 @@ func (wc *Watcher) Start() {
|
||||
return
|
||||
}
|
||||
case <-wc.cmdChan:
|
||||
//Exit goroutine.
|
||||
// Exit goroutine.
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -130,9 +130,9 @@ func (wc *Watcher) Start() {
|
||||
wc.isRunning = true
|
||||
}
|
||||
|
||||
//Stop the running.
|
||||
// Stop the running.
|
||||
func (wc *Watcher) Stop() {
|
||||
//Lock for state changing
|
||||
// Lock for state changing
|
||||
wc.Lock()
|
||||
if !wc.isRunning {
|
||||
wc.Unlock()
|
||||
@ -142,18 +142,18 @@ func (wc *Watcher) Stop() {
|
||||
wc.isRunning = false
|
||||
wc.Unlock()
|
||||
|
||||
//Disable policy.
|
||||
// Disable policy.
|
||||
if wc.p != nil {
|
||||
wc.p.Disable()
|
||||
}
|
||||
|
||||
//Stop watcher.
|
||||
// Stop watcher.
|
||||
wc.cmdChan <- true
|
||||
|
||||
log.Infof("Worker for policy %s is stopped.\n", wc.p.Name())
|
||||
}
|
||||
|
||||
//IsRunning to indicate if the watcher is still running.
|
||||
// IsRunning to indicate if the watcher is still running.
|
||||
func (wc *Watcher) IsRunning() bool {
|
||||
wc.RLock()
|
||||
defer wc.RUnlock()
|
||||
|
@ -20,12 +20,12 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//HeaderPrefix is the prefix of the value of Authorization header.
|
||||
//It has the space.
|
||||
// HeaderPrefix is the prefix of the value of Authorization header.
|
||||
// It has the space.
|
||||
const HeaderPrefix = "Harbor-Secret "
|
||||
|
||||
//FromRequest tries to get Harbor Secret from request header.
|
||||
//It will return empty string if the reqeust is nil.
|
||||
// FromRequest tries to get Harbor Secret from request header.
|
||||
// It will return empty string if the reqeust is nil.
|
||||
func FromRequest(req *http.Request) string {
|
||||
if req == nil {
|
||||
return ""
|
||||
@ -37,7 +37,7 @@ func FromRequest(req *http.Request) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//AddToRequest add the secret to request
|
||||
// AddToRequest add the secret to request
|
||||
func AddToRequest(req *http.Request, secret string) error {
|
||||
if req == nil {
|
||||
return fmt.Errorf("input request is nil, unable to set secret")
|
||||
|
@ -34,8 +34,8 @@ type Context interface {
|
||||
HasWritePerm(projectIDOrName interface{}) bool
|
||||
// HasAllPerm returns whether the user has all permissions to the project
|
||||
HasAllPerm(projectIDOrName interface{}) bool
|
||||
//Get current user's all project
|
||||
// Get current user's all project
|
||||
GetMyProjects() ([]*models.Project, error)
|
||||
//Get user's role in provided project
|
||||
// Get user's role in provided project
|
||||
GetProjectRoles(projectIDOrName interface{}) []int
|
||||
}
|
||||
|
@ -184,11 +184,11 @@ func (s *SecurityContext) GetRolesByGroup(projectIDOrName interface{}) []int {
|
||||
var roles []int
|
||||
user := s.user
|
||||
project, err := s.pm.Get(projectIDOrName)
|
||||
//No user, group or project info
|
||||
// No user, group or project info
|
||||
if err != nil || project == nil || user == nil || len(user.GroupList) == 0 {
|
||||
return roles
|
||||
}
|
||||
//Get role by LDAP group
|
||||
// Get role by LDAP group
|
||||
groupDNConditions := group.GetGroupDNQueryCondition(user.GroupList)
|
||||
roles, err = dao.GetRolesByLDAPGroup(project.ProjectID, groupDNConditions)
|
||||
if err != nil {
|
||||
|
@ -28,7 +28,7 @@ func TestIsAuthenticated(t *testing.T) {
|
||||
isAuthenticated := context.IsAuthenticated()
|
||||
assert.False(t, isAuthenticated)
|
||||
|
||||
//invalid secret
|
||||
// invalid secret
|
||||
context = NewSecurityContext("invalid_secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"secret": "username",
|
||||
@ -36,7 +36,7 @@ func TestIsAuthenticated(t *testing.T) {
|
||||
isAuthenticated = context.IsAuthenticated()
|
||||
assert.False(t, isAuthenticated)
|
||||
|
||||
//valid secret
|
||||
// valid secret
|
||||
context = NewSecurityContext("secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"secret": "username",
|
||||
@ -51,7 +51,7 @@ func TestGetUsername(t *testing.T) {
|
||||
username := context.GetUsername()
|
||||
assert.Equal(t, "", username)
|
||||
|
||||
//invalid secret
|
||||
// invalid secret
|
||||
context = NewSecurityContext("invalid_secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"secret": "username",
|
||||
@ -59,7 +59,7 @@ func TestGetUsername(t *testing.T) {
|
||||
username = context.GetUsername()
|
||||
assert.Equal(t, "", username)
|
||||
|
||||
//valid secret
|
||||
// valid secret
|
||||
context = NewSecurityContext("secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"secret": "username",
|
||||
@ -101,7 +101,7 @@ func TestHasReadPerm(t *testing.T) {
|
||||
hasReadPerm := context.HasReadPerm("project_name")
|
||||
assert.False(t, hasReadPerm)
|
||||
|
||||
//invalid secret
|
||||
// invalid secret
|
||||
context = NewSecurityContext("invalid_secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"jobservice_secret": secret.JobserviceUser,
|
||||
@ -109,7 +109,7 @@ func TestHasReadPerm(t *testing.T) {
|
||||
hasReadPerm = context.HasReadPerm("project_name")
|
||||
assert.False(t, hasReadPerm)
|
||||
|
||||
//valid secret, project name
|
||||
// valid secret, project name
|
||||
context = NewSecurityContext("jobservice_secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"jobservice_secret": secret.JobserviceUser,
|
||||
@ -117,7 +117,7 @@ func TestHasReadPerm(t *testing.T) {
|
||||
hasReadPerm = context.HasReadPerm("project_name")
|
||||
assert.True(t, hasReadPerm)
|
||||
|
||||
//valid secret, project ID
|
||||
// valid secret, project ID
|
||||
hasReadPerm = context.HasReadPerm(1)
|
||||
assert.True(t, hasReadPerm)
|
||||
}
|
||||
@ -163,7 +163,7 @@ func TestGetMyProjects(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetProjectRoles(t *testing.T) {
|
||||
//invalid secret
|
||||
// invalid secret
|
||||
context := NewSecurityContext("invalid_secret",
|
||||
secret.NewStore(map[string]string{
|
||||
"jobservice_secret": secret.JobserviceUser,
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
// Client communicates with clair endpoint to scan image and get detailed scan result
|
||||
type Client struct {
|
||||
endpoint string
|
||||
//need to customize the logger to write output to job log.
|
||||
// need to customize the logger to write output to job log.
|
||||
logger *log.Logger
|
||||
client *http.Client
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
//var client = NewClient()
|
||||
// var client = NewClient()
|
||||
|
||||
// ParseClairSev parse the severity of clair to Harbor's Severity type if the string is not recognized the value will be set to unknown.
|
||||
func ParseClairSev(clairSev string) models.Severity {
|
||||
@ -94,7 +94,7 @@ func transformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOver
|
||||
}, overallSev
|
||||
}
|
||||
|
||||
//TransformVuln is for running scanning job in both job service V1 and V2.
|
||||
// TransformVuln is for running scanning job in both job service V1 and V2.
|
||||
func TransformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOverview, models.Severity) {
|
||||
return transformVuln(clairVuln)
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ func newClient(addr, identity, username, password string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//try to swith to SSL/TLS
|
||||
// try to swith to SSL/TLS
|
||||
if !tls {
|
||||
if ok, _ := client.Extension("STARTTLS"); ok {
|
||||
log.Debugf("switching the connection with %s to SSL/TLS ...", addr)
|
||||
|
@ -38,9 +38,9 @@ func TestSend(t *testing.T) {
|
||||
err := Send(addr, identity, username, password,
|
||||
timeout, tls, insecure, from, to,
|
||||
subject, message)
|
||||
//bypass the check due to securty policy change on gmail
|
||||
//TODO
|
||||
//assert.Nil(t, err)
|
||||
// bypass the check due to securty policy change on gmail
|
||||
// TODO
|
||||
// assert.Nil(t, err)
|
||||
|
||||
/*not work on travis
|
||||
// non-tls connection
|
||||
@ -52,7 +52,7 @@ func TestSend(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
*/
|
||||
|
||||
//invalid username/password
|
||||
// invalid username/password
|
||||
username = "invalid_username"
|
||||
err = Send(addr, identity, username, password,
|
||||
timeout, tls, insecure, from, to,
|
||||
@ -78,9 +78,9 @@ func TestPing(t *testing.T) {
|
||||
// tls connection
|
||||
err := Ping(addr, identity, username, password,
|
||||
timeout, tls, insecure)
|
||||
//bypass the check due to securty policy change on gmail
|
||||
//TODO
|
||||
//assert.Nil(t, err)
|
||||
// bypass the check due to securty policy change on gmail
|
||||
// TODO
|
||||
// assert.Nil(t, err)
|
||||
|
||||
/*not work on travis
|
||||
// non-tls connection
|
||||
@ -91,7 +91,7 @@ func TestPing(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
*/
|
||||
|
||||
//invalid username/password
|
||||
// invalid username/password
|
||||
username = "invalid_username"
|
||||
err = Ping(addr, identity, username, password,
|
||||
timeout, tls, insecure)
|
||||
|
@ -65,7 +65,7 @@ func ReversibleDecrypt(str, key string) (string, error) {
|
||||
str = str[len(EncryptHeaderV1):]
|
||||
return decryptAES(str, key)
|
||||
}
|
||||
//fallback to base64
|
||||
// fallback to base64
|
||||
return decodeB64(str)
|
||||
}
|
||||
|
||||
|
@ -30,20 +30,20 @@ import (
|
||||
goldap "gopkg.in/ldap.v2"
|
||||
)
|
||||
|
||||
//ErrNotFound ...
|
||||
// ErrNotFound ...
|
||||
var ErrNotFound = errors.New("entity not found")
|
||||
|
||||
//ErrDNSyntax ...
|
||||
// ErrDNSyntax ...
|
||||
var ErrDNSyntax = errors.New("Invalid DN syntax")
|
||||
|
||||
//Session - define a LDAP session
|
||||
// Session - define a LDAP session
|
||||
type Session struct {
|
||||
ldapConfig models.LdapConf
|
||||
ldapGroupConfig models.LdapGroupConf
|
||||
ldapConn *goldap.Conn
|
||||
}
|
||||
|
||||
//LoadSystemLdapConfig - load LDAP configure from adminserver
|
||||
// LoadSystemLdapConfig - load LDAP configure from adminserver
|
||||
func LoadSystemLdapConfig() (*Session, error) {
|
||||
|
||||
authMode, err := config.AuthMode()
|
||||
@ -71,7 +71,7 @@ func LoadSystemLdapConfig() (*Session, error) {
|
||||
return CreateWithAllConfig(*ldapConf, *ldapGroupConfig)
|
||||
}
|
||||
|
||||
//CreateWithConfig -
|
||||
// CreateWithConfig -
|
||||
func CreateWithConfig(ldapConf models.LdapConf) (*Session, error) {
|
||||
return CreateWithAllConfig(ldapConf, models.LdapGroupConf{})
|
||||
}
|
||||
@ -140,7 +140,7 @@ func formatURL(ldapURL string) (string, error) {
|
||||
|
||||
}
|
||||
|
||||
//ConnectionTest - test ldap session connection with system default setting
|
||||
// ConnectionTest - test ldap session connection with system default setting
|
||||
func (session *Session) ConnectionTest() error {
|
||||
session, err := LoadSystemLdapConfig()
|
||||
if err != nil {
|
||||
@ -150,12 +150,12 @@ func (session *Session) ConnectionTest() error {
|
||||
return ConnectionTestWithAllConfig(session.ldapConfig, session.ldapGroupConfig)
|
||||
}
|
||||
|
||||
//ConnectionTestWithConfig -
|
||||
// ConnectionTestWithConfig -
|
||||
func ConnectionTestWithConfig(ldapConfig models.LdapConf) error {
|
||||
return ConnectionTestWithAllConfig(ldapConfig, models.LdapGroupConf{})
|
||||
}
|
||||
|
||||
//ConnectionTestWithAllConfig - test ldap session connection, out of the scope of normal session create/close
|
||||
// ConnectionTestWithAllConfig - test ldap session connection, out of the scope of normal session create/close
|
||||
func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig models.LdapGroupConf) error {
|
||||
|
||||
authMode, err := config.AuthMode()
|
||||
@ -164,7 +164,7 @@ func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig mod
|
||||
return err
|
||||
}
|
||||
|
||||
//If no password present, use the system default password
|
||||
// If no password present, use the system default password
|
||||
if ldapConfig.LdapSearchPassword == "" && authMode == "ldap_auth" {
|
||||
|
||||
session, err := LoadSystemLdapConfig()
|
||||
@ -199,7 +199,7 @@ func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig mod
|
||||
return nil
|
||||
}
|
||||
|
||||
//SearchUser - search LDAP user by name
|
||||
// SearchUser - search LDAP user by name
|
||||
func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
|
||||
var ldapUsers []models.LdapUser
|
||||
ldapFilter := session.createUserFilter(username)
|
||||
@ -213,7 +213,7 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
|
||||
var u models.LdapUser
|
||||
groupDNList := []string{}
|
||||
for _, attr := range ldapEntry.Attributes {
|
||||
//OpenLdap sometimes contain leading space in useranme
|
||||
// OpenLdap sometimes contain leading space in useranme
|
||||
val := strings.TrimSpace(attr.Values[0])
|
||||
log.Debugf("Current ldap entry attr name: %s\n", attr.Name)
|
||||
switch strings.ToLower(attr.Name) {
|
||||
@ -249,7 +249,7 @@ func (session *Session) Bind(dn string, password string) error {
|
||||
return session.ldapConn.Bind(dn, password)
|
||||
}
|
||||
|
||||
//Open - open Session
|
||||
// Open - open Session
|
||||
func (session *Session) Open() error {
|
||||
|
||||
splitLdapURL := strings.Split(session.ldapConfig.LdapURL, "://")
|
||||
@ -305,9 +305,9 @@ func (session *Session) SearchLdapAttribute(baseDN, filter string, attributes []
|
||||
baseDN,
|
||||
session.ldapConfig.LdapScope,
|
||||
goldap.NeverDerefAliases,
|
||||
0, //Unlimited results
|
||||
0, //Search Timeout
|
||||
false, //Types only
|
||||
0, // Unlimited results
|
||||
0, // Search Timeout
|
||||
false, // Types only
|
||||
filter,
|
||||
attributes,
|
||||
nil,
|
||||
@ -329,7 +329,7 @@ func (session *Session) SearchLdapAttribute(baseDN, filter string, attributes []
|
||||
|
||||
}
|
||||
|
||||
//CreateUserFilter - create filter to search user with specified username
|
||||
// CreateUserFilter - create filter to search user with specified username
|
||||
func (session *Session) createUserFilter(username string) string {
|
||||
var filterTag string
|
||||
|
||||
@ -353,14 +353,14 @@ func (session *Session) createUserFilter(username string) string {
|
||||
return ldapFilter
|
||||
}
|
||||
|
||||
//Close - close current session
|
||||
// Close - close current session
|
||||
func (session *Session) Close() {
|
||||
if session.ldapConn != nil {
|
||||
session.ldapConn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
//SearchGroupByName ...
|
||||
// SearchGroupByName ...
|
||||
func (session *Session) SearchGroupByName(groupName string) ([]models.LdapGroup, error) {
|
||||
return session.searchGroup(session.ldapGroupConfig.LdapGroupBaseDN,
|
||||
session.ldapGroupConfig.LdapGroupFilter,
|
||||
@ -368,7 +368,7 @@ func (session *Session) SearchGroupByName(groupName string) ([]models.LdapGroup,
|
||||
session.ldapGroupConfig.LdapGroupNameAttribute)
|
||||
}
|
||||
|
||||
//SearchGroupByDN ...
|
||||
// SearchGroupByDN ...
|
||||
func (session *Session) SearchGroupByDN(groupDN string) ([]models.LdapGroup, error) {
|
||||
if _, err := goldap.ParseDN(groupDN); err != nil {
|
||||
return nil, ErrDNSyntax
|
||||
@ -396,7 +396,7 @@ func (session *Session) searchGroup(baseDN, filter, groupName, groupNameAttribut
|
||||
var group models.LdapGroup
|
||||
group.GroupDN = ldapEntry.DN
|
||||
for _, attr := range ldapEntry.Attributes {
|
||||
//OpenLdap sometimes contain leading space in useranme
|
||||
// OpenLdap sometimes contain leading space in useranme
|
||||
val := strings.TrimSpace(attr.Values[0])
|
||||
log.Debugf("Current ldap entry attr name: %s\n", attr.Name)
|
||||
switch strings.ToLower(attr.Name) {
|
||||
|
@ -23,7 +23,7 @@ var adminServerLdapTestConfig = map[string]interface{}{
|
||||
common.PostGreSQLUsername: "postgres",
|
||||
common.PostGreSQLPassword: "root123",
|
||||
common.PostGreSQLDatabase: "registry",
|
||||
//config.SelfRegistration: true,
|
||||
// config.SelfRegistration: true,
|
||||
common.LDAPURL: "ldap://127.0.0.1",
|
||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||
common.LDAPSearchPwd: "admin",
|
||||
|
@ -64,12 +64,12 @@ func New(out io.Writer, fmtter Formatter, lvl Level) *Logger {
|
||||
}
|
||||
}
|
||||
|
||||
//DefaultLogger returns the default logger within the pkg, i.e. the one used in log.Infof....
|
||||
// DefaultLogger returns the default logger within the pkg, i.e. the one used in log.Infof....
|
||||
func DefaultLogger() *Logger {
|
||||
return logger
|
||||
}
|
||||
|
||||
//SetOutput sets the output of Logger l
|
||||
// SetOutput sets the output of Logger l
|
||||
func (l *Logger) SetOutput(out io.Writer) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
@ -77,7 +77,7 @@ func (l *Logger) SetOutput(out io.Writer) {
|
||||
l.out = out
|
||||
}
|
||||
|
||||
//SetFormatter sets the formatter of Logger l
|
||||
// SetFormatter sets the formatter of Logger l
|
||||
func (l *Logger) SetFormatter(fmtter Formatter) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
@ -85,7 +85,7 @@ func (l *Logger) SetFormatter(fmtter Formatter) {
|
||||
l.fmtter = fmtter
|
||||
}
|
||||
|
||||
//SetLevel sets the level of Logger l
|
||||
// SetLevel sets the level of Logger l
|
||||
func (l *Logger) SetLevel(lvl Level) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
@ -93,17 +93,17 @@ func (l *Logger) SetLevel(lvl Level) {
|
||||
l.lvl = lvl
|
||||
}
|
||||
|
||||
//SetOutput sets the output of default Logger
|
||||
// SetOutput sets the output of default Logger
|
||||
func SetOutput(out io.Writer) {
|
||||
logger.SetOutput(out)
|
||||
}
|
||||
|
||||
//SetFormatter sets the formatter of default Logger
|
||||
// SetFormatter sets the formatter of default Logger
|
||||
func SetFormatter(fmtter Formatter) {
|
||||
logger.SetFormatter(fmtter)
|
||||
}
|
||||
|
||||
//SetLevel sets the level of default Logger
|
||||
// SetLevel sets the level of default Logger
|
||||
func SetLevel(lvl Level) {
|
||||
logger.SetLevel(lvl)
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ var (
|
||||
type Target struct {
|
||||
Tag string `json:"tag"`
|
||||
Hashes data.Hashes `json:"hashes"`
|
||||
//TODO: update fields as needed.
|
||||
// TODO: update fields as needed.
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -102,7 +102,7 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target
|
||||
} else if err != nil {
|
||||
return res, err
|
||||
}
|
||||
//Remove root.json such that when remote repository is removed the local cache can't be reused.
|
||||
// Remove root.json such that when remote repository is removed the local cache can't be reused.
|
||||
rootJSON := path.Join(notaryCachePath, "tuf", fqRepo, "metadata/root.json")
|
||||
rmErr := os.Remove(rootJSON)
|
||||
if rmErr != nil {
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
latency int = 10 //second, the network latency when token is received
|
||||
latency int = 10 // second, the network latency when token is received
|
||||
scheme = "bearer"
|
||||
)
|
||||
|
||||
@ -51,7 +51,7 @@ type tokenAuthorizer struct {
|
||||
|
||||
// add token to the request
|
||||
func (t *tokenAuthorizer) Modify(req *http.Request) error {
|
||||
//only handle requests sent to registry
|
||||
// only handle requests sent to registry
|
||||
goon, err := t.filterReq(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -110,7 +110,7 @@ func (r *Registry) Catalog() ([]string, error) {
|
||||
}
|
||||
|
||||
repos = append(repos, catalogResp.Repositories...)
|
||||
//Link: </v2/_catalog?last=library%2Fhello-world-25&n=100>; rel="next"
|
||||
// Link: </v2/_catalog?last=library%2Fhello-world-25&n=100>; rel="next"
|
||||
link := resp.Header.Get("Link")
|
||||
if strings.HasSuffix(link, `rel="next"`) && strings.Index(link, "<") >= 0 && strings.Index(link, ">") >= 0 {
|
||||
suffix = link[strings.Index(link, "<")+1 : strings.Index(link, ">")]
|
||||
|
@ -29,35 +29,35 @@ var (
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
//TimeMarker is used to control an action not to be taken frequently within the interval
|
||||
// TimeMarker is used to control an action not to be taken frequently within the interval
|
||||
type TimeMarker struct {
|
||||
sync.RWMutex
|
||||
next time.Time
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
//Mark tries to mark a future time, which is after the duration of interval from the time it's called.
|
||||
// Mark tries to mark a future time, which is after the duration of interval from the time it's called.
|
||||
func (t *TimeMarker) Mark() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.next = time.Now().Add(t.interval)
|
||||
}
|
||||
|
||||
//Check returns true if the current time is after the mark by this marker, and the caction the mark guards and be taken.
|
||||
// Check returns true if the current time is after the mark by this marker, and the caction the mark guards and be taken.
|
||||
func (t *TimeMarker) Check() bool {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return time.Now().After(t.next)
|
||||
}
|
||||
|
||||
//Next returns the time of the next mark.
|
||||
// Next returns the time of the next mark.
|
||||
func (t *TimeMarker) Next() time.Time {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return t.next
|
||||
}
|
||||
|
||||
//ScanAllMarker ...
|
||||
// ScanAllMarker ...
|
||||
func ScanAllMarker() *TimeMarker {
|
||||
once.Do(func() {
|
||||
a := os.Getenv("HARBOR_SCAN_ALL_INTERVAL")
|
||||
@ -74,7 +74,7 @@ func ScanAllMarker() *TimeMarker {
|
||||
return scanAllMarker
|
||||
}
|
||||
|
||||
//ScanOverviewMarker ...
|
||||
// ScanOverviewMarker ...
|
||||
func ScanOverviewMarker() *TimeMarker {
|
||||
return scanOverviewMarker
|
||||
}
|
||||
|
@ -31,13 +31,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//TokenURLSuffix ...
|
||||
// TokenURLSuffix ...
|
||||
TokenURLSuffix = "/oauth/token"
|
||||
//AuthURLSuffix ...
|
||||
// AuthURLSuffix ...
|
||||
AuthURLSuffix = "/oauth/authorize"
|
||||
//UserInfoURLSuffix ...
|
||||
// UserInfoURLSuffix ...
|
||||
UserInfoURLSuffix = "/userinfo"
|
||||
//UsersURLSuffix ...
|
||||
// UsersURLSuffix ...
|
||||
UsersURLSuffix = "/Users"
|
||||
)
|
||||
|
||||
@ -45,13 +45,13 @@ var uaaTransport = &http.Transport{}
|
||||
|
||||
// Client provides funcs to interact with UAA.
|
||||
type Client interface {
|
||||
//PasswordAuth accepts username and password, return a token if it's valid.
|
||||
// PasswordAuth accepts username and password, return a token if it's valid.
|
||||
PasswordAuth(username, password string) (*oauth2.Token, error)
|
||||
//GetUserInfoByToken send the token to OIDC endpoint to get user info, currently it's also used to validate the token.
|
||||
// GetUserInfoByToken send the token to OIDC endpoint to get user info, currently it's also used to validate the token.
|
||||
GetUserInfo(token string) (*UserInfo, error)
|
||||
//SearchUser searches a user based on user name.
|
||||
// SearchUser searches a user based on user name.
|
||||
SearchUser(name string) ([]*SearchUserEntry, error)
|
||||
//UpdateConfig updates the config of the current client
|
||||
// UpdateConfig updates the config of the current client
|
||||
UpdateConfig(cfg *ClientConfig) error
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ type ClientConfig struct {
|
||||
ClientSecret string
|
||||
Endpoint string
|
||||
SkipTLSVerify bool
|
||||
//Absolut path for CA root used to communicate with UAA, only effective when skipTLSVerify set to false.
|
||||
// Absolut path for CA root used to communicate with UAA, only effective when skipTLSVerify set to false.
|
||||
CARootPath string
|
||||
}
|
||||
|
||||
@ -76,13 +76,13 @@ type UserInfo struct {
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
||||
//SearchUserEmailEntry ...
|
||||
// SearchUserEmailEntry ...
|
||||
type SearchUserEmailEntry struct {
|
||||
Value string `json:"value"`
|
||||
Primary bool `json:"primary"`
|
||||
}
|
||||
|
||||
//SearchUserEntry is the struct of an entry of user within search result.
|
||||
// SearchUserEntry is the struct of an entry of user within search result.
|
||||
type SearchUserEntry struct {
|
||||
ID string `json:"id"`
|
||||
ExtID string `json:"externalId"`
|
||||
@ -91,7 +91,7 @@ type SearchUserEntry struct {
|
||||
Groups []interface{}
|
||||
}
|
||||
|
||||
//SearchUserRes is the struct to parse the result of search user API of UAA
|
||||
// SearchUserRes is the struct to parse the result of search user API of UAA
|
||||
type SearchUserRes struct {
|
||||
Resources []*SearchUserEntry `json:"resources"`
|
||||
TotalResults int `json:"totalResults"`
|
||||
@ -104,7 +104,7 @@ type defaultClient struct {
|
||||
oauth2Cfg *oauth2.Config
|
||||
twoLegCfg *clientcredentials.Config
|
||||
endpoint string
|
||||
//TODO: add public key, etc...
|
||||
// TODO: add public key, etc...
|
||||
}
|
||||
|
||||
func (dc *defaultClient) PasswordAuth(username, password string) (*oauth2.Token, error) {
|
||||
@ -190,7 +190,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
|
||||
return err
|
||||
}
|
||||
pool := x509.NewCertPool()
|
||||
//Do not throw error if the certificate is malformed, so we can put a place holder.
|
||||
// Do not throw error if the certificate is malformed, so we can put a place holder.
|
||||
if ok := pool.AppendCertsFromPEM(content); !ok {
|
||||
log.Warningf("Failed to append certificate to cert pool, cert path: %s", cfg.CARootPath)
|
||||
} else {
|
||||
@ -202,7 +202,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
|
||||
}
|
||||
uaaTransport.TLSClientConfig = tc
|
||||
dc.httpClient.Transport = uaaTransport
|
||||
//dc.httpClient.Transport = transport.
|
||||
// dc.httpClient.Transport = transport.
|
||||
|
||||
oc := &oauth2.Config{
|
||||
ClientID: cfg.ClientID,
|
||||
|
@ -99,7 +99,7 @@ func TestNewClientWithCACert(t *testing.T) {
|
||||
}
|
||||
_, err := NewDefaultClient(cfg)
|
||||
assert.Nil(err)
|
||||
//Skip if it's malformed.
|
||||
// Skip if it's malformed.
|
||||
cfg.CARootPath = path.Join(currPath(), "test", "non-ca.pem")
|
||||
_, err = NewDefaultClient(cfg)
|
||||
assert.Nil(err)
|
||||
|
@ -128,7 +128,7 @@ func ParseTimeStamp(timestamp string) (*time.Time, error) {
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
//ConvertMapToStruct is used to fill the specified struct with map.
|
||||
// ConvertMapToStruct is used to fill the specified struct with map.
|
||||
func ConvertMapToStruct(object interface{}, values interface{}) error {
|
||||
if object == nil {
|
||||
return errors.New("nil struct is not supported")
|
||||
@ -168,7 +168,7 @@ func ParseProjectIDOrName(value interface{}) (int64, string, error) {
|
||||
return id, name, nil
|
||||
}
|
||||
|
||||
//SafeCastString -- cast a object to string saftely
|
||||
// SafeCastString -- cast a object to string saftely
|
||||
func SafeCastString(value interface{}) string {
|
||||
if result, ok := value.(string); ok {
|
||||
return result
|
||||
@ -176,7 +176,7 @@ func SafeCastString(value interface{}) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//SafeCastInt --
|
||||
// SafeCastInt --
|
||||
func SafeCastInt(value interface{}) int {
|
||||
if result, ok := value.(int); ok {
|
||||
return result
|
||||
@ -184,7 +184,7 @@ func SafeCastInt(value interface{}) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//SafeCastBool --
|
||||
// SafeCastBool --
|
||||
func SafeCastBool(value interface{}) bool {
|
||||
if result, ok := value.(bool); ok {
|
||||
return result
|
||||
@ -192,7 +192,7 @@ func SafeCastBool(value interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//SafeCastFloat64 --
|
||||
// SafeCastFloat64 --
|
||||
func SafeCastFloat64(value interface{}) float64 {
|
||||
if result, ok := value.(float64); ok {
|
||||
return result
|
||||
|
@ -121,7 +121,7 @@ func TestReversibleEncrypt(t *testing.T) {
|
||||
if decrypted != password {
|
||||
t.Errorf("decrypted password: %s, is not identical to original", decrypted)
|
||||
}
|
||||
//Test b64 for backward compatibility
|
||||
// Test b64 for backward compatibility
|
||||
b64password := base64.StdEncoding.EncodeToString([]byte(password))
|
||||
decrypted, err = ReversibleDecrypt(b64password, key)
|
||||
if err != nil {
|
||||
|
@ -17,22 +17,22 @@ const (
|
||||
authHeader = "Authorization"
|
||||
)
|
||||
|
||||
//Authenticator defined behaviors of doing auth checking.
|
||||
// Authenticator defined behaviors of doing auth checking.
|
||||
type Authenticator interface {
|
||||
//Auth incoming request
|
||||
// Auth incoming request
|
||||
//
|
||||
//req *http.Request: the incoming request
|
||||
// req *http.Request: the incoming request
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// nil returned if successfully done
|
||||
// otherwise an error returned
|
||||
DoAuth(req *http.Request) error
|
||||
}
|
||||
|
||||
//SecretAuthenticator implements interface 'Authenticator' based on simple secret.
|
||||
// SecretAuthenticator implements interface 'Authenticator' based on simple secret.
|
||||
type SecretAuthenticator struct{}
|
||||
|
||||
//DoAuth implements same method in interface 'Authenticator'.
|
||||
// DoAuth implements same method in interface 'Authenticator'.
|
||||
func (sa *SecretAuthenticator) DoAuth(req *http.Request) error {
|
||||
if req == nil {
|
||||
return errors.New("nil request")
|
||||
@ -48,7 +48,7 @@ func (sa *SecretAuthenticator) DoAuth(req *http.Request) error {
|
||||
}
|
||||
|
||||
secret := strings.TrimSpace(strings.TrimPrefix(h, secretPrefix))
|
||||
//incase both two are empty
|
||||
// incase both two are empty
|
||||
if utils.IsEmptyStr(secret) {
|
||||
return errors.New("empty secret is not allowed")
|
||||
}
|
||||
|
@ -18,37 +18,37 @@ import (
|
||||
"github.com/goharbor/harbor/src/jobservice/opm"
|
||||
)
|
||||
|
||||
//Handler defines approaches to handle the http requests.
|
||||
// Handler defines approaches to handle the http requests.
|
||||
type Handler interface {
|
||||
//HandleLaunchJobReq is used to handle the job submission request.
|
||||
// HandleLaunchJobReq is used to handle the job submission request.
|
||||
HandleLaunchJobReq(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
//HandleGetJobReq is used to handle the job stats query request.
|
||||
// HandleGetJobReq is used to handle the job stats query request.
|
||||
HandleGetJobReq(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
//HandleJobActionReq is used to handle the job action requests (stop/retry).
|
||||
// HandleJobActionReq is used to handle the job action requests (stop/retry).
|
||||
HandleJobActionReq(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
//HandleCheckStatusReq is used to handle the job service healthy status checking request.
|
||||
// HandleCheckStatusReq is used to handle the job service healthy status checking request.
|
||||
HandleCheckStatusReq(w http.ResponseWriter, req *http.Request)
|
||||
|
||||
//HandleJobLogReq is used to handle the request of getting job logs
|
||||
// HandleJobLogReq is used to handle the request of getting job logs
|
||||
HandleJobLogReq(w http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
//DefaultHandler is the default request handler which implements the Handler interface.
|
||||
// DefaultHandler is the default request handler which implements the Handler interface.
|
||||
type DefaultHandler struct {
|
||||
controller core.Interface
|
||||
}
|
||||
|
||||
//NewDefaultHandler is constructor of DefaultHandler.
|
||||
// NewDefaultHandler is constructor of DefaultHandler.
|
||||
func NewDefaultHandler(ctl core.Interface) *DefaultHandler {
|
||||
return &DefaultHandler{
|
||||
controller: ctl,
|
||||
}
|
||||
}
|
||||
|
||||
//HandleLaunchJobReq is implementation of method defined in interface 'Handler'
|
||||
// HandleLaunchJobReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w) {
|
||||
return
|
||||
@ -60,14 +60,14 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
//unmarshal data
|
||||
// unmarshal data
|
||||
jobReq := models.JobRequest{}
|
||||
if err = json.Unmarshal(data, &jobReq); err != nil {
|
||||
dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err))
|
||||
return
|
||||
}
|
||||
|
||||
//Pass request to the controller for the follow-up.
|
||||
// Pass request to the controller for the follow-up.
|
||||
jobStats, err := dh.controller.LaunchJob(jobReq)
|
||||
if err != nil {
|
||||
dh.handleError(w, http.StatusInternalServerError, errs.LaunchJobError(err))
|
||||
@ -83,7 +83,7 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
|
||||
w.Write(data)
|
||||
}
|
||||
|
||||
//HandleGetJobReq is implementation of method defined in interface 'Handler'
|
||||
// HandleGetJobReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w) {
|
||||
return
|
||||
@ -113,7 +113,7 @@ func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Reque
|
||||
w.Write(data)
|
||||
}
|
||||
|
||||
//HandleJobActionReq is implementation of method defined in interface 'Handler'
|
||||
// HandleJobActionReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w) {
|
||||
return
|
||||
@ -128,7 +128,7 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
//unmarshal data
|
||||
// unmarshal data
|
||||
jobActionReq := models.JobActionRequest{}
|
||||
if err = json.Unmarshal(data, &jobActionReq); err != nil {
|
||||
dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err))
|
||||
@ -174,10 +174,10 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent) //only header, no content returned
|
||||
w.WriteHeader(http.StatusNoContent) // only header, no content returned
|
||||
}
|
||||
|
||||
//HandleCheckStatusReq is implementation of method defined in interface 'Handler'
|
||||
// HandleCheckStatusReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w) {
|
||||
return
|
||||
@ -198,7 +198,7 @@ func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.
|
||||
w.Write(data)
|
||||
}
|
||||
|
||||
//HandleJobLogReq is implementation of method defined in interface 'Handler'
|
||||
// HandleJobLogReq is implementation of method defined in interface 'Handler'
|
||||
func (dh *DefaultHandler) HandleJobLogReq(w http.ResponseWriter, req *http.Request) {
|
||||
if !dh.preCheck(w) {
|
||||
return
|
||||
|
@ -15,26 +15,26 @@ const (
|
||||
apiVersion = "v1"
|
||||
)
|
||||
|
||||
//Router defines the related routes for the job service and directs the request
|
||||
//to the right handler method.
|
||||
// Router defines the related routes for the job service and directs the request
|
||||
// to the right handler method.
|
||||
type Router interface {
|
||||
//ServeHTTP used to handle the http requests
|
||||
// ServeHTTP used to handle the http requests
|
||||
ServeHTTP(w http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
|
||||
//BaseRouter provides the basic routes for the job service based on the golang http server mux.
|
||||
// BaseRouter provides the basic routes for the job service based on the golang http server mux.
|
||||
type BaseRouter struct {
|
||||
//Use mux to keep the routes mapping.
|
||||
// Use mux to keep the routes mapping.
|
||||
router *mux.Router
|
||||
|
||||
//Handler used to handle the requests
|
||||
// Handler used to handle the requests
|
||||
handler Handler
|
||||
|
||||
//Do auth
|
||||
// Do auth
|
||||
authenticator Authenticator
|
||||
}
|
||||
|
||||
//NewBaseRouter is the constructor of BaseRouter.
|
||||
// NewBaseRouter is the constructor of BaseRouter.
|
||||
func NewBaseRouter(handler Handler, authenticator Authenticator) Router {
|
||||
br := &BaseRouter{
|
||||
router: mux.NewRouter(),
|
||||
@ -42,15 +42,15 @@ func NewBaseRouter(handler Handler, authenticator Authenticator) Router {
|
||||
authenticator: authenticator,
|
||||
}
|
||||
|
||||
//Register routes here
|
||||
// Register routes here
|
||||
br.registerRoutes()
|
||||
|
||||
return br
|
||||
}
|
||||
|
||||
//ServeHTTP is the implementation of Router interface.
|
||||
// ServeHTTP is the implementation of Router interface.
|
||||
func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
//Do auth
|
||||
// Do auth
|
||||
if err := br.authenticator.DoAuth(req); err != nil {
|
||||
authErr := errs.UnauthorizedError(err)
|
||||
w.WriteHeader(http.StatusUnauthorized)
|
||||
@ -58,11 +58,11 @@ func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
//Directly pass requests to the server mux.
|
||||
// Directly pass requests to the server mux.
|
||||
br.router.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
//registerRoutes adds routes to the server mux.
|
||||
// registerRoutes adds routes to the server mux.
|
||||
func (br *BaseRouter) registerRoutes() {
|
||||
subRouter := br.router.PathPrefix(fmt.Sprintf("%s/%s", baseRoute, apiVersion)).Subrouter()
|
||||
|
||||
|
@ -14,37 +14,37 @@ import (
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
//Server serves the http requests.
|
||||
// Server serves the http requests.
|
||||
type Server struct {
|
||||
//The real backend http server to serve the requests
|
||||
// The real backend http server to serve the requests
|
||||
httpServer *http.Server
|
||||
|
||||
//Define the routes of http service
|
||||
// Define the routes of http service
|
||||
router Router
|
||||
|
||||
//Keep the configurations of server
|
||||
// Keep the configurations of server
|
||||
config ServerConfig
|
||||
|
||||
//The context
|
||||
// The context
|
||||
context *env.Context
|
||||
}
|
||||
|
||||
//ServerConfig contains the configurations of Server.
|
||||
// ServerConfig contains the configurations of Server.
|
||||
type ServerConfig struct {
|
||||
//Protocol server listening on: https/http
|
||||
// Protocol server listening on: https/http
|
||||
Protocol string
|
||||
|
||||
//Server listening port
|
||||
// Server listening port
|
||||
Port uint
|
||||
|
||||
//Cert file path if using https
|
||||
// Cert file path if using https
|
||||
Cert string
|
||||
|
||||
//Key file path if using https
|
||||
// Key file path if using https
|
||||
Key string
|
||||
}
|
||||
|
||||
//NewServer is constructor of Server.
|
||||
// NewServer is constructor of Server.
|
||||
func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
|
||||
apiServer := &Server{
|
||||
router: router,
|
||||
@ -60,7 +60,7 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
|
||||
//Initialize TLS/SSL config if protocol is https
|
||||
// Initialize TLS/SSL config if protocol is https
|
||||
if cfg.Protocol == config.JobServiceProtocolHTTPS {
|
||||
tlsCfg := &tls.Config{
|
||||
MinVersion: tls.VersionTLS12,
|
||||
@ -83,7 +83,7 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
|
||||
return apiServer
|
||||
}
|
||||
|
||||
//Start the server to serve requests.
|
||||
// Start the server to serve requests.
|
||||
func (s *Server) Start() {
|
||||
s.context.WG.Add(1)
|
||||
|
||||
@ -106,7 +106,7 @@ func (s *Server) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
//Stop server gracefully.
|
||||
// Stop server gracefully.
|
||||
func (s *Server) Stop() {
|
||||
go func() {
|
||||
defer func() {
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package config provides functions to handle the configurations of job service.
|
||||
// Package config provides functions to handle the configurations of job service.
|
||||
package config
|
||||
|
||||
import (
|
||||
@ -30,81 +30,81 @@ const (
|
||||
jobServiceAdminServerEndpoint = "ADMINSERVER_URL"
|
||||
jobServiceAuthSecret = "JOBSERVICE_SECRET"
|
||||
|
||||
//JobServiceProtocolHTTPS points to the 'https' protocol
|
||||
// JobServiceProtocolHTTPS points to the 'https' protocol
|
||||
JobServiceProtocolHTTPS = "https"
|
||||
//JobServiceProtocolHTTP points to the 'http' protocol
|
||||
// JobServiceProtocolHTTP points to the 'http' protocol
|
||||
JobServiceProtocolHTTP = "http"
|
||||
|
||||
//JobServicePoolBackendRedis represents redis backend
|
||||
// JobServicePoolBackendRedis represents redis backend
|
||||
JobServicePoolBackendRedis = "redis"
|
||||
|
||||
//secret of UI
|
||||
// secret of UI
|
||||
uiAuthSecret = "UI_SECRET"
|
||||
|
||||
//redis protocol schema
|
||||
// redis protocol schema
|
||||
redisSchema = "redis://"
|
||||
)
|
||||
|
||||
//DefaultConfig is the default configuration reference
|
||||
// DefaultConfig is the default configuration reference
|
||||
var DefaultConfig = &Configuration{}
|
||||
|
||||
//Configuration loads and keeps the related configuration items of job service.
|
||||
// Configuration loads and keeps the related configuration items of job service.
|
||||
type Configuration struct {
|
||||
//Protocol server listening on: https/http
|
||||
// Protocol server listening on: https/http
|
||||
Protocol string `yaml:"protocol"`
|
||||
|
||||
//Server listening port
|
||||
// Server listening port
|
||||
Port uint `yaml:"port"`
|
||||
|
||||
AdminServer string `yaml:"admin_server"`
|
||||
|
||||
//Additional config when using https
|
||||
// Additional config when using https
|
||||
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
|
||||
|
||||
//Configurations of worker pool
|
||||
// Configurations of worker pool
|
||||
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
|
||||
|
||||
//Logger configurations
|
||||
// Logger configurations
|
||||
LoggerConfig *LoggerConfig `yaml:"logger,omitempty"`
|
||||
}
|
||||
|
||||
//HTTPSConfig keeps additional configurations when using https protocol
|
||||
// HTTPSConfig keeps additional configurations when using https protocol
|
||||
type HTTPSConfig struct {
|
||||
Cert string `yaml:"cert"`
|
||||
Key string `yaml:"key"`
|
||||
}
|
||||
|
||||
//RedisPoolConfig keeps redis pool info.
|
||||
// RedisPoolConfig keeps redis pool info.
|
||||
type RedisPoolConfig struct {
|
||||
RedisURL string `yaml:"redis_url"`
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
//PoolConfig keeps worker pool configurations.
|
||||
// PoolConfig keeps worker pool configurations.
|
||||
type PoolConfig struct {
|
||||
//Worker concurrency
|
||||
// Worker concurrency
|
||||
WorkerCount uint `yaml:"workers"`
|
||||
Backend string `yaml:"backend"`
|
||||
RedisPoolCfg *RedisPoolConfig `yaml:"redis_pool,omitempty"`
|
||||
}
|
||||
|
||||
//LoggerConfig keeps logger configurations.
|
||||
// LoggerConfig keeps logger configurations.
|
||||
type LoggerConfig struct {
|
||||
BasePath string `yaml:"path"`
|
||||
LogLevel string `yaml:"level"`
|
||||
ArchivePeriod uint `yaml:"archive_period"`
|
||||
}
|
||||
|
||||
//Load the configuration options from the specified yaml file.
|
||||
//If the yaml file is specified and existing, load configurations from yaml file first;
|
||||
//If detecting env variables is specified, load configurations from env variables;
|
||||
//Please pay attentions, the detected env variable will override the same configuration item loading from file.
|
||||
// Load the configuration options from the specified yaml file.
|
||||
// If the yaml file is specified and existing, load configurations from yaml file first;
|
||||
// If detecting env variables is specified, load configurations from env variables;
|
||||
// Please pay attentions, the detected env variable will override the same configuration item loading from file.
|
||||
//
|
||||
//yamlFilePath string: The path config yaml file
|
||||
//readEnv bool : Whether detect the environment variables or not
|
||||
// yamlFilePath string: The path config yaml file
|
||||
// readEnv bool : Whether detect the environment variables or not
|
||||
func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
|
||||
if !utils.IsEmptyStr(yamlFilePath) {
|
||||
//Try to load from file first
|
||||
// Try to load from file first
|
||||
data, err := ioutil.ReadFile(yamlFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -115,11 +115,11 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
|
||||
}
|
||||
|
||||
if detectEnv {
|
||||
//Load from env variables
|
||||
// Load from env variables
|
||||
c.loadEnvs()
|
||||
}
|
||||
|
||||
//translate redis url if needed
|
||||
// translate redis url if needed
|
||||
if c.PoolConfig != nil && c.PoolConfig.RedisPoolCfg != nil {
|
||||
redisAddress := c.PoolConfig.RedisPoolCfg.RedisURL
|
||||
if !utils.IsEmptyStr(redisAddress) {
|
||||
@ -135,11 +135,11 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
//Validate settings
|
||||
// Validate settings
|
||||
return c.validate()
|
||||
}
|
||||
|
||||
//GetLogBasePath returns the log base path config
|
||||
// GetLogBasePath returns the log base path config
|
||||
func GetLogBasePath() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.BasePath
|
||||
@ -148,7 +148,7 @@ func GetLogBasePath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogLevel returns the log level
|
||||
// GetLogLevel returns the log level
|
||||
func GetLogLevel() string {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.LogLevel
|
||||
@ -157,31 +157,31 @@ func GetLogLevel() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
//GetLogArchivePeriod returns the archive period
|
||||
// GetLogArchivePeriod returns the archive period
|
||||
func GetLogArchivePeriod() uint {
|
||||
if DefaultConfig.LoggerConfig != nil {
|
||||
return DefaultConfig.LoggerConfig.ArchivePeriod
|
||||
}
|
||||
|
||||
return 1 //return default
|
||||
return 1 // return default
|
||||
}
|
||||
|
||||
//GetAuthSecret get the auth secret from the env
|
||||
// GetAuthSecret get the auth secret from the env
|
||||
func GetAuthSecret() string {
|
||||
return utils.ReadEnv(jobServiceAuthSecret)
|
||||
}
|
||||
|
||||
//GetUIAuthSecret get the auth secret of UI side
|
||||
// GetUIAuthSecret get the auth secret of UI side
|
||||
func GetUIAuthSecret() string {
|
||||
return utils.ReadEnv(uiAuthSecret)
|
||||
}
|
||||
|
||||
//GetAdminServerEndpoint return the admin server endpoint
|
||||
// GetAdminServerEndpoint return the admin server endpoint
|
||||
func GetAdminServerEndpoint() string {
|
||||
return DefaultConfig.AdminServer
|
||||
}
|
||||
|
||||
//Load env variables
|
||||
// Load env variables
|
||||
func (c *Configuration) loadEnvs() {
|
||||
prot := utils.ReadEnv(jobServiceProtocol)
|
||||
if !utils.IsEmptyStr(prot) {
|
||||
@ -195,7 +195,7 @@ func (c *Configuration) loadEnvs() {
|
||||
}
|
||||
}
|
||||
|
||||
//Only when protocol is https
|
||||
// Only when protocol is https
|
||||
if c.Protocol == JobServiceProtocolHTTPS {
|
||||
cert := utils.ReadEnv(jobServiceHTTPCert)
|
||||
if !utils.IsEmptyStr(cert) {
|
||||
@ -256,7 +256,7 @@ func (c *Configuration) loadEnvs() {
|
||||
}
|
||||
}
|
||||
|
||||
//logger
|
||||
// logger
|
||||
loggerPath := utils.ReadEnv(jobServiceLoggerBasePath)
|
||||
if !utils.IsEmptyStr(loggerPath) {
|
||||
if c.LoggerConfig == nil {
|
||||
@ -281,14 +281,14 @@ func (c *Configuration) loadEnvs() {
|
||||
}
|
||||
}
|
||||
|
||||
//admin server
|
||||
// admin server
|
||||
if adminServer := utils.ReadEnv(jobServiceAdminServerEndpoint); !utils.IsEmptyStr(adminServer) {
|
||||
c.AdminServer = adminServer
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//Check if the configurations are valid settings.
|
||||
// Check if the configurations are valid settings.
|
||||
func (c *Configuration) validate() error {
|
||||
if c.Protocol != JobServiceProtocolHTTPS &&
|
||||
c.Protocol != JobServiceProtocolHTTP {
|
||||
@ -323,7 +323,7 @@ func (c *Configuration) validate() error {
|
||||
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
|
||||
}
|
||||
|
||||
//When backend is redis
|
||||
// When backend is redis
|
||||
if c.PoolConfig.Backend == JobServicePoolBackendRedis {
|
||||
if c.PoolConfig.RedisPoolCfg == nil {
|
||||
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
|
||||
@ -366,5 +366,5 @@ func (c *Configuration) validate() error {
|
||||
return fmt.Errorf("invalid admin server endpoint: %s", err)
|
||||
}
|
||||
|
||||
return nil //valid
|
||||
return nil // valid
|
||||
}
|
||||
|
@ -21,38 +21,38 @@ const (
|
||||
hookDeactivated = "error"
|
||||
)
|
||||
|
||||
//Controller implement the core interface and provides related job handle methods.
|
||||
//Controller will coordinate the lower components to complete the process as a commander role.
|
||||
// Controller implement the core interface and provides related job handle methods.
|
||||
// Controller will coordinate the lower components to complete the process as a commander role.
|
||||
type Controller struct {
|
||||
//Refer the backend pool
|
||||
// Refer the backend pool
|
||||
backendPool pool.Interface
|
||||
}
|
||||
|
||||
//NewController is constructor of Controller.
|
||||
// NewController is constructor of Controller.
|
||||
func NewController(backendPool pool.Interface) *Controller {
|
||||
return &Controller{
|
||||
backendPool: backendPool,
|
||||
}
|
||||
}
|
||||
|
||||
//LaunchJob is implementation of same method in core interface.
|
||||
// LaunchJob is implementation of same method in core interface.
|
||||
func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
if err := validJobReq(req); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
//Validate job name
|
||||
// Validate job name
|
||||
jobType, isKnownJob := c.backendPool.IsKnownJob(req.Job.Name)
|
||||
if !isKnownJob {
|
||||
return models.JobStats{}, fmt.Errorf("job with name '%s' is unknown", req.Job.Name)
|
||||
}
|
||||
|
||||
//Validate parameters
|
||||
// Validate parameters
|
||||
if err := c.backendPool.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
|
||||
return models.JobStats{}, err
|
||||
}
|
||||
|
||||
//Enqueue job regarding of the kind
|
||||
// Enqueue job regarding of the kind
|
||||
var (
|
||||
res models.JobStats
|
||||
err error
|
||||
@ -73,7 +73,7 @@ func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
res, err = c.backendPool.Enqueue(req.Job.Name, req.Job.Parameters, req.Job.Metadata.IsUnique)
|
||||
}
|
||||
|
||||
//Register status hook?
|
||||
// Register status hook?
|
||||
if err == nil {
|
||||
if !utils.IsEmptyStr(req.Job.StatusHook) {
|
||||
if err := c.backendPool.RegisterHook(res.Stats.JobID, req.Job.StatusHook); err != nil {
|
||||
@ -87,7 +87,7 @@ func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
//GetJob is implementation of same method in core interface.
|
||||
// GetJob is implementation of same method in core interface.
|
||||
func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return models.JobStats{}, errors.New("empty job ID")
|
||||
@ -96,7 +96,7 @@ func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
|
||||
return c.backendPool.GetJobStats(jobID)
|
||||
}
|
||||
|
||||
//StopJob is implementation of same method in core interface.
|
||||
// StopJob is implementation of same method in core interface.
|
||||
func (c *Controller) StopJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
@ -105,7 +105,7 @@ func (c *Controller) StopJob(jobID string) error {
|
||||
return c.backendPool.StopJob(jobID)
|
||||
}
|
||||
|
||||
//CancelJob is implementation of same method in core interface.
|
||||
// CancelJob is implementation of same method in core interface.
|
||||
func (c *Controller) CancelJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
@ -114,7 +114,7 @@ func (c *Controller) CancelJob(jobID string) error {
|
||||
return c.backendPool.CancelJob(jobID)
|
||||
}
|
||||
|
||||
//RetryJob is implementation of same method in core interface.
|
||||
// RetryJob is implementation of same method in core interface.
|
||||
func (c *Controller) RetryJob(jobID string) error {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return errors.New("empty job ID")
|
||||
@ -123,7 +123,7 @@ func (c *Controller) RetryJob(jobID string) error {
|
||||
return c.backendPool.RetryJob(jobID)
|
||||
}
|
||||
|
||||
//GetJobLogData is used to return the log text data for the specified job if exists
|
||||
// GetJobLogData is used to return the log text data for the specified job if exists
|
||||
func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
|
||||
if utils.IsEmptyStr(jobID) {
|
||||
return nil, errors.New("empty job ID")
|
||||
@ -142,7 +142,7 @@ func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
|
||||
return logData, nil
|
||||
}
|
||||
|
||||
//CheckStatus is implementation of same method in core interface.
|
||||
// CheckStatus is implementation of same method in core interface.
|
||||
func (c *Controller) CheckStatus() (models.JobPoolStats, error) {
|
||||
return c.backendPool.Stats()
|
||||
}
|
||||
|
@ -1,59 +1,59 @@
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package core provides the main job operation interface and components.
|
||||
// Package core provides the main job operation interface and components.
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/jobservice/models"
|
||||
)
|
||||
|
||||
//Interface defines the related main methods of job operation.
|
||||
// Interface defines the related main methods of job operation.
|
||||
type Interface interface {
|
||||
//LaunchJob is used to handle the job submission request.
|
||||
// LaunchJob is used to handle the job submission request.
|
||||
//
|
||||
//req JobRequest : Job request contains related required information of queuing job.
|
||||
// req JobRequest : Job request contains related required information of queuing job.
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// JobStats: Job status info with ID and self link returned if job is successfully launched.
|
||||
// error : Error returned if failed to launch the specified job.
|
||||
LaunchJob(req models.JobRequest) (models.JobStats, error)
|
||||
|
||||
//GetJob is used to handle the job stats query request.
|
||||
// GetJob is used to handle the job stats query request.
|
||||
//
|
||||
//jobID string: ID of job.
|
||||
// jobID string: ID of job.
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// JobStats: Job status info if job exists.
|
||||
// error : Error returned if failed to get the specified job.
|
||||
GetJob(jobID string) (models.JobStats, error)
|
||||
|
||||
//StopJob is used to handle the job stopping request.
|
||||
// StopJob is used to handle the job stopping request.
|
||||
//
|
||||
//jobID string: ID of job.
|
||||
// jobID string: ID of job.
|
||||
//
|
||||
//Return:
|
||||
// Return:
|
||||
// error : Error returned if failed to stop the specified job.
|
||||
StopJob(jobID string) error
|
||||
|
||||
//RetryJob is used to handle the job retrying request.
|
||||
// RetryJob is used to handle the job retrying request.
|
||||
//
|
||||
//jobID string : ID of job.
|
||||
// jobID string : ID of job.
|
||||
//
|
||||
//Return:
|
||||
// Return:
|
||||
// error : Error returned if failed to retry the specified job.
|
||||
RetryJob(jobID string) error
|
||||
|
||||
//Cancel the job
|
||||
// Cancel the job
|
||||
//
|
||||
//jobID string : ID of the enqueued job
|
||||
// jobID string : ID of the enqueued job
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// error : error returned if meet any problems
|
||||
CancelJob(jobID string) error
|
||||
|
||||
//CheckStatus is used to handle the job service healthy status checking request.
|
||||
// CheckStatus is used to handle the job service healthy status checking request.
|
||||
CheckStatus() (models.JobPoolStats, error)
|
||||
|
||||
//GetJobLogData is used to return the log text data for the specified job if exists
|
||||
// GetJobLogData is used to return the log text data for the specified job if exists
|
||||
GetJobLogData(jobID string) ([]byte, error)
|
||||
}
|
||||
|
16
src/jobservice/env/context.go
vendored
16
src/jobservice/env/context.go
vendored
@ -5,20 +5,20 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
//Context keep some sharable materials and system controlling channels.
|
||||
//The system context.Context interface is also included.
|
||||
// Context keep some sharable materials and system controlling channels.
|
||||
// The system context.Context interface is also included.
|
||||
type Context struct {
|
||||
//The system context with cancel capability.
|
||||
// The system context with cancel capability.
|
||||
SystemContext context.Context
|
||||
|
||||
//Coordination signal
|
||||
// Coordination signal
|
||||
WG *sync.WaitGroup
|
||||
|
||||
//Report errors to bootstrap component
|
||||
//Once error is reported by lower components, the whole system should exit
|
||||
// Report errors to bootstrap component
|
||||
// Once error is reported by lower components, the whole system should exit
|
||||
ErrorChan chan error
|
||||
|
||||
//The base job context reference
|
||||
//It will be the parent conetext of job execution context
|
||||
// The base job context reference
|
||||
// It will be the parent conetext of job execution context
|
||||
JobContext JobContext
|
||||
}
|
||||
|
38
src/jobservice/env/job_context.go
vendored
38
src/jobservice/env/job_context.go
vendored
@ -8,54 +8,54 @@ import (
|
||||
"github.com/goharbor/harbor/src/jobservice/logger"
|
||||
)
|
||||
|
||||
//JobContext is combination of BaseContext and other job specified resources.
|
||||
//JobContext will be the real execution context for one job.
|
||||
// JobContext is combination of BaseContext and other job specified resources.
|
||||
// JobContext will be the real execution context for one job.
|
||||
type JobContext interface {
|
||||
//Build the context based on the parent context
|
||||
// Build the context based on the parent context
|
||||
//
|
||||
//dep JobData : Dependencies for building the context, just in case that the build
|
||||
//function need some external info
|
||||
// dep JobData : Dependencies for building the context, just in case that the build
|
||||
// function need some external info
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// new JobContext based on the parent one
|
||||
// error if meet any problems
|
||||
Build(dep JobData) (JobContext, error)
|
||||
|
||||
//Get property from the context
|
||||
// Get property from the context
|
||||
//
|
||||
//prop string : key of the context property
|
||||
// prop string : key of the context property
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// The data of the specified context property if have
|
||||
// bool to indicate if the property existing
|
||||
Get(prop string) (interface{}, bool)
|
||||
|
||||
//SystemContext returns the system context
|
||||
// SystemContext returns the system context
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// context.Context
|
||||
SystemContext() context.Context
|
||||
|
||||
//Checkin is bridge func for reporting detailed status
|
||||
// Checkin is bridge func for reporting detailed status
|
||||
//
|
||||
//status string : detailed status
|
||||
// status string : detailed status
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// error if meet any problems
|
||||
Checkin(status string) error
|
||||
|
||||
//OPCommand return the control operational command like stop/cancel if have
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
//
|
||||
//Returns:
|
||||
// Returns:
|
||||
// op command if have
|
||||
// flag to indicate if have command
|
||||
OPCommand() (string, bool)
|
||||
|
||||
//Return the logger
|
||||
// Return the logger
|
||||
GetLogger() logger.Interface
|
||||
}
|
||||
|
||||
//JobData defines job context dependencies.
|
||||
// JobData defines job context dependencies.
|
||||
type JobData struct {
|
||||
ID string
|
||||
Name string
|
||||
@ -63,5 +63,5 @@ type JobData struct {
|
||||
ExtraData map[string]interface{}
|
||||
}
|
||||
|
||||
//JobContextInitializer is a func to initialize the concrete job context
|
||||
// JobContextInitializer is a func to initialize the concrete job context
|
||||
type JobContextInitializer func(ctx *Context) (JobContext, error)
|
||||
|
@ -1,6 +1,6 @@
|
||||
// Copyright 2018 The Harbor Authors. All rights reserved.
|
||||
|
||||
//Package errs define some system errors with specified types.
|
||||
// Package errs define some system errors with specified types.
|
||||
package errs
|
||||
|
||||
import (
|
||||
@ -8,46 +8,46 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//JobStoppedErrorCode is code for jobStoppedError
|
||||
// JobStoppedErrorCode is code for jobStoppedError
|
||||
JobStoppedErrorCode = 10000 + iota
|
||||
//JobCancelledErrorCode is code for jobCancelledError
|
||||
// JobCancelledErrorCode is code for jobCancelledError
|
||||
JobCancelledErrorCode
|
||||
//ReadRequestBodyErrorCode is code for the error of reading http request body error
|
||||
// ReadRequestBodyErrorCode is code for the error of reading http request body error
|
||||
ReadRequestBodyErrorCode
|
||||
//HandleJSONDataErrorCode is code for the error of handling json data error
|
||||
// HandleJSONDataErrorCode is code for the error of handling json data error
|
||||
HandleJSONDataErrorCode
|
||||
//MissingBackendHandlerErrorCode is code for the error of missing backend controller
|
||||
// MissingBackendHandlerErrorCode is code for the error of missing backend controller
|
||||
MissingBackendHandlerErrorCode
|
||||
//LaunchJobErrorCode is code for the error of launching job
|
||||
// LaunchJobErrorCode is code for the error of launching job
|
||||
LaunchJobErrorCode
|
||||
//CheckStatsErrorCode is code for the error of checking stats of worker pool
|
||||
// CheckStatsErrorCode is code for the error of checking stats of worker pool
|
||||
CheckStatsErrorCode
|
||||
//GetJobStatsErrorCode is code for the error of getting stats of enqueued job
|
||||
// GetJobStatsErrorCode is code for the error of getting stats of enqueued job
|
||||
GetJobStatsErrorCode
|
||||
//StopJobErrorCode is code for the error of stopping job
|
||||
// StopJobErrorCode is code for the error of stopping job
|
||||
StopJobErrorCode
|
||||
//CancelJobErrorCode is code for the error of cancelling job
|
||||
// CancelJobErrorCode is code for the error of cancelling job
|
||||
CancelJobErrorCode
|
||||
//RetryJobErrorCode is code for the error of retrying job
|
||||
// RetryJobErrorCode is code for the error of retrying job
|
||||
RetryJobErrorCode
|
||||
//UnknownActionNameErrorCode is code for the case of unknown action name
|
||||
// UnknownActionNameErrorCode is code for the case of unknown action name
|
||||
UnknownActionNameErrorCode
|
||||
//GetJobLogErrorCode is code for the error of getting job log
|
||||
// GetJobLogErrorCode is code for the error of getting job log
|
||||
GetJobLogErrorCode
|
||||
//NoObjectFoundErrorCode is code for the error of no object found
|
||||
// NoObjectFoundErrorCode is code for the error of no object found
|
||||
NoObjectFoundErrorCode
|
||||
//UnAuthorizedErrorCode is code for the error of unauthorized accessing
|
||||
// UnAuthorizedErrorCode is code for the error of unauthorized accessing
|
||||
UnAuthorizedErrorCode
|
||||
)
|
||||
|
||||
//baseError ...
|
||||
// baseError ...
|
||||
type baseError struct {
|
||||
Code uint16 `json:"code"`
|
||||
Err string `json:"message"`
|
||||
Description string `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
//Error is implementation of error interface.
|
||||
// Error is implementation of error interface.
|
||||
func (be baseError) Error() string {
|
||||
if data, err := json.Marshal(be); err == nil {
|
||||
return string(data)
|
||||
@ -56,7 +56,7 @@ func (be baseError) Error() string {
|
||||
return "{}"
|
||||
}
|
||||
|
||||
//New customized errors
|
||||
// New customized errors
|
||||
func New(code uint16, err string, description string) error {
|
||||
return baseError{
|
||||
Code: code,
|
||||
@ -65,72 +65,72 @@ func New(code uint16, err string, description string) error {
|
||||
}
|
||||
}
|
||||
|
||||
//ReadRequestBodyError is error wrapper for the error of reading request body.
|
||||
// ReadRequestBodyError is error wrapper for the error of reading request body.
|
||||
func ReadRequestBodyError(err error) error {
|
||||
return New(ReadRequestBodyErrorCode, "Read request body failed with error", err.Error())
|
||||
}
|
||||
|
||||
//HandleJSONDataError is error wrapper for the error of handling json data.
|
||||
// HandleJSONDataError is error wrapper for the error of handling json data.
|
||||
func HandleJSONDataError(err error) error {
|
||||
return New(HandleJSONDataErrorCode, "Handle json data failed with error", err.Error())
|
||||
}
|
||||
|
||||
//MissingBackendHandlerError is error wrapper for the error of missing backend controller.
|
||||
// MissingBackendHandlerError is error wrapper for the error of missing backend controller.
|
||||
func MissingBackendHandlerError(err error) error {
|
||||
return New(MissingBackendHandlerErrorCode, "Missing backend controller to handle the requests", err.Error())
|
||||
}
|
||||
|
||||
//LaunchJobError is error wrapper for the error of launching job failed.
|
||||
// LaunchJobError is error wrapper for the error of launching job failed.
|
||||
func LaunchJobError(err error) error {
|
||||
return New(LaunchJobErrorCode, "Launch job failed with error", err.Error())
|
||||
}
|
||||
|
||||
//CheckStatsError is error wrapper for the error of checking stats failed
|
||||
// CheckStatsError is error wrapper for the error of checking stats failed
|
||||
func CheckStatsError(err error) error {
|
||||
return New(CheckStatsErrorCode, "Check stats of server failed with error", err.Error())
|
||||
}
|
||||
|
||||
//GetJobStatsError is error wrapper for the error of getting job stats
|
||||
// GetJobStatsError is error wrapper for the error of getting job stats
|
||||
func GetJobStatsError(err error) error {
|
||||
return New(GetJobStatsErrorCode, "Get job stats failed with error", err.Error())
|
||||
}
|
||||
|
||||
//StopJobError is error for the case of stopping job failed
|
||||
// StopJobError is error for the case of stopping job failed
|
||||
func StopJobError(err error) error {
|
||||
return New(StopJobErrorCode, "Stop job failed with error", err.Error())
|
||||
}
|
||||
|
||||
//CancelJobError is error for the case of cancelling job failed
|
||||
// CancelJobError is error for the case of cancelling job failed
|
||||
func CancelJobError(err error) error {
|
||||
return New(CancelJobErrorCode, "Cancel job failed with error", err.Error())
|
||||
}
|
||||
|
||||
//RetryJobError is error for the case of retrying job failed
|
||||
// RetryJobError is error for the case of retrying job failed
|
||||
func RetryJobError(err error) error {
|
||||
return New(RetryJobErrorCode, "Retry job failed with error", err.Error())
|
||||
}
|
||||
|
||||
//UnknownActionNameError is error for the case of getting unknown job action
|
||||
// UnknownActionNameError is error for the case of getting unknown job action
|
||||
func UnknownActionNameError(err error) error {
|
||||
return New(UnknownActionNameErrorCode, "Unknown job action name", err.Error())
|
||||
}
|
||||
|
||||
//GetJobLogError is error for the case of getting job log failed
|
||||
// GetJobLogError is error for the case of getting job log failed
|
||||
func GetJobLogError(err error) error {
|
||||
return New(GetJobLogErrorCode, "Failed to get the job log", err.Error())
|
||||
}
|
||||
|
||||
//UnauthorizedError is error for the case of unauthorized accessing
|
||||
// UnauthorizedError is error for the case of unauthorized accessing
|
||||
func UnauthorizedError(err error) error {
|
||||
return New(UnAuthorizedErrorCode, "Unauthorized", err.Error())
|
||||
}
|
||||
|
||||
//jobStoppedError is designed for the case of stopping job.
|
||||
// jobStoppedError is designed for the case of stopping job.
|
||||
type jobStoppedError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
//JobStoppedError is error wrapper for the case of stopping job.
|
||||
// JobStoppedError is error wrapper for the case of stopping job.
|
||||
func JobStoppedError() error {
|
||||
return jobStoppedError{
|
||||
baseError{
|
||||
@ -140,12 +140,12 @@ func JobStoppedError() error {
|
||||
}
|
||||
}
|
||||
|
||||
//jobCancelledError is designed for the case of cancelling job.
|
||||
// jobCancelledError is designed for the case of cancelling job.
|
||||
type jobCancelledError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
//JobCancelledError is error wrapper for the case of cancelling job.
|
||||
// JobCancelledError is error wrapper for the case of cancelling job.
|
||||
func JobCancelledError() error {
|
||||
return jobCancelledError{
|
||||
baseError{
|
||||
@ -155,12 +155,12 @@ func JobCancelledError() error {
|
||||
}
|
||||
}
|
||||
|
||||
//objectNotFound is designed for the case of no object found
|
||||
// objectNotFound is designed for the case of no object found
|
||||
type objectNotFoundError struct {
|
||||
baseError
|
||||
}
|
||||
|
||||
//NoObjectFoundError is error wrapper for the case of no object found
|
||||
// NoObjectFoundError is error wrapper for the case of no object found
|
||||
func NoObjectFoundError(object string) error {
|
||||
return objectNotFoundError{
|
||||
baseError{
|
||||
@ -171,19 +171,19 @@ func NoObjectFoundError(object string) error {
|
||||
}
|
||||
}
|
||||
|
||||
//IsJobStoppedError return true if the error is jobStoppedError
|
||||
// IsJobStoppedError return true if the error is jobStoppedError
|
||||
func IsJobStoppedError(err error) bool {
|
||||
_, ok := err.(jobStoppedError)
|
||||
return ok
|
||||
}
|
||||
|
||||
//IsJobCancelledError return true if the error is jobCancelledError
|
||||
// IsJobCancelledError return true if the error is jobCancelledError
|
||||
func IsJobCancelledError(err error) bool {
|
||||
_, ok := err.(jobCancelledError)
|
||||
return ok
|
||||
}
|
||||
|
||||
//IsObjectNotFoundError return true if the error is objectNotFoundError
|
||||
// IsObjectNotFoundError return true if the error is objectNotFoundError
|
||||
func IsObjectNotFoundError(err error) bool {
|
||||
_, ok := err.(objectNotFoundError)
|
||||
return ok
|
||||
|
@ -25,28 +25,28 @@ const (
|
||||
maxRetryTimes = 5
|
||||
)
|
||||
|
||||
//Context ...
|
||||
// Context ...
|
||||
type Context struct {
|
||||
//System context
|
||||
// System context
|
||||
sysContext context.Context
|
||||
|
||||
//Logger for job
|
||||
// Logger for job
|
||||
logger logger.Interface
|
||||
|
||||
//op command func
|
||||
// op command func
|
||||
opCommandFunc job.CheckOPCmdFunc
|
||||
|
||||
//checkin func
|
||||
// checkin func
|
||||
checkInFunc job.CheckInFunc
|
||||
|
||||
//other required information
|
||||
// other required information
|
||||
properties map[string]interface{}
|
||||
|
||||
//admin server client
|
||||
// admin server client
|
||||
adminClient client.Client
|
||||
}
|
||||
|
||||
//NewContext ...
|
||||
// NewContext ...
|
||||
func NewContext(sysCtx context.Context, adminClient client.Client) *Context {
|
||||
return &Context{
|
||||
sysContext: sysCtx,
|
||||
@ -55,7 +55,7 @@ func NewContext(sysCtx context.Context, adminClient client.Client) *Context {
|
||||
}
|
||||
}
|
||||
|
||||
//Init ...
|
||||
// Init ...
|
||||
func (c *Context) Init() error {
|
||||
var (
|
||||
counter = 0
|
||||
@ -83,8 +83,8 @@ func (c *Context) Init() error {
|
||||
return dao.InitDatabase(db)
|
||||
}
|
||||
|
||||
//Build implements the same method in env.JobContext interface
|
||||
//This func will build the job execution context before running
|
||||
// Build implements the same method in env.JobContext interface
|
||||
// This func will build the job execution context before running
|
||||
func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
jContext := &Context{
|
||||
sysContext: c.sysContext,
|
||||
@ -92,14 +92,14 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
properties: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
//Copy properties
|
||||
// Copy properties
|
||||
if len(c.properties) > 0 {
|
||||
for k, v := range c.properties {
|
||||
jContext.properties[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
//Refresh admin server properties
|
||||
// Refresh admin server properties
|
||||
props, err := c.adminClient.GetCfgs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -108,7 +108,7 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
jContext.properties[k] = v
|
||||
}
|
||||
|
||||
//Init logger here
|
||||
// Init logger here
|
||||
logPath := fmt.Sprintf("%s/%s.log", config.GetLogBasePath(), dep.ID)
|
||||
jContext.logger = jlogger.New(logPath, config.GetLogLevel())
|
||||
if jContext.logger == nil {
|
||||
@ -141,18 +141,18 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
|
||||
return jContext, nil
|
||||
}
|
||||
|
||||
//Get implements the same method in env.JobContext interface
|
||||
// Get implements the same method in env.JobContext interface
|
||||
func (c *Context) Get(prop string) (interface{}, bool) {
|
||||
v, ok := c.properties[prop]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
//SystemContext implements the same method in env.JobContext interface
|
||||
// SystemContext implements the same method in env.JobContext interface
|
||||
func (c *Context) SystemContext() context.Context {
|
||||
return c.sysContext
|
||||
}
|
||||
|
||||
//Checkin is bridge func for reporting detailed status
|
||||
// Checkin is bridge func for reporting detailed status
|
||||
func (c *Context) Checkin(status string) error {
|
||||
if c.checkInFunc != nil {
|
||||
c.checkInFunc(status)
|
||||
@ -163,7 +163,7 @@ func (c *Context) Checkin(status string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//OPCommand return the control operational command like stop/cancel if have
|
||||
// OPCommand return the control operational command like stop/cancel if have
|
||||
func (c *Context) OPCommand() (string, bool) {
|
||||
if c.opCommandFunc != nil {
|
||||
return c.opCommandFunc()
|
||||
@ -172,7 +172,7 @@ func (c *Context) OPCommand() (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
//GetLogger returns the logger
|
||||
// GetLogger returns the logger
|
||||
func (c *Context) GetLogger() logger.Interface {
|
||||
return c.logger
|
||||
}
|
||||
|
@ -17,20 +17,20 @@ import (
|
||||
"github.com/goharbor/harbor/src/jobservice/env"
|
||||
)
|
||||
|
||||
//DemoJob is the job to demostrate the job interface.
|
||||
// DemoJob is the job to demostrate the job interface.
|
||||
type DemoJob struct{}
|
||||
|
||||
//MaxFails is implementation of same method in Interface.
|
||||
// MaxFails is implementation of same method in Interface.
|
||||
func (dj *DemoJob) MaxFails() uint {
|
||||
return 3
|
||||
}
|
||||
|
||||
//ShouldRetry ...
|
||||
// ShouldRetry ...
|
||||
func (dj *DemoJob) ShouldRetry() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//Validate is implementation of same method in Interface.
|
||||
// Validate is implementation of same method in Interface.
|
||||
func (dj *DemoJob) Validate(params map[string]interface{}) error {
|
||||
if params == nil || len(params) == 0 {
|
||||
return errors.New("parameters required for replication job")
|
||||
@ -47,7 +47,7 @@ func (dj *DemoJob) Validate(params map[string]interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//Run the replication logic here.
|
||||
// Run the replication logic here.
|
||||
func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error {
|
||||
logger := ctx.GetLogger()
|
||||
|
||||
@ -69,9 +69,9 @@ func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error
|
||||
/*if 1 != 0 {
|
||||
return errors.New("I suicide")
|
||||
}*/
|
||||
//runtime error
|
||||
//var runtime_err error = nil
|
||||
//fmt.Println(runtime_err.Error())
|
||||
// runtime error
|
||||
// var runtime_err error = nil
|
||||
// fmt.Println(runtime_err.Error())
|
||||
|
||||
logger.Info("check in 30%")
|
||||
ctx.Checkin("30%")
|
||||
@ -83,10 +83,10 @@ func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error
|
||||
ctx.Checkin("100%")
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
//HOLD ON FOR A WHILE
|
||||
// HOLD ON FOR A WHILE
|
||||
logger.Error("Holding for 20 sec")
|
||||
<-time.After(15 * time.Second)
|
||||
//logger.Fatal("I'm back, check if I'm stopped/cancelled")
|
||||
// logger.Fatal("I'm back, check if I'm stopped/cancelled")
|
||||
|
||||
if cmd, ok := ctx.OPCommand(); ok {
|
||||
logger.Infof("cmd=%s\n", cmd)
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
package impl
|
||||
|
||||
//Define the register name constants of known jobs
|
||||
// Define the register name constants of known jobs
|
||||
|
||||
const (
|
||||
//KnownJobDemo is name of demo job
|
||||
// KnownJobDemo is name of demo job
|
||||
KnownJobDemo = "DEMO"
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user