Apply consistent format for comments

Signed-off-by: 陈德 <chende@caicloud.io>
This commit is contained in:
陈德 2018-09-05 16:16:31 +08:00
parent 4d601292d1
commit 0582db9a82
227 changed files with 2268 additions and 2268 deletions

View File

@ -256,13 +256,13 @@ func parseStringToBool(str string) (interface{}, error) {
// Init system configurations. If env RESET is set or configurations // Init system configurations. If env RESET is set or configurations
// read from storage driver is null, load all configurations from env // read from storage driver is null, load all configurations from env
func Init() (err error) { func Init() (err error) {
//init database // init database
envCfgs := map[string]interface{}{} envCfgs := map[string]interface{}{}
if err := LoadFromEnv(envCfgs, true); err != nil { if err := LoadFromEnv(envCfgs, true); err != nil {
return err return err
} }
db := GetDatabaseFromCfg(envCfgs) db := GetDatabaseFromCfg(envCfgs)
//Initialize the schema, then register the DB. // Initialize the schema, then register the DB.
if err := dao.UpgradeSchema(db); err != nil { if err := dao.UpgradeSchema(db); err != nil {
return err return err
} }
@ -273,7 +273,7 @@ func Init() (err error) {
return err return err
} }
//Use reload key to avoid reset customed setting after restart // Use reload key to avoid reset customed setting after restart
curCfgs, err := CfgStore.Read() curCfgs, err := CfgStore.Read()
if err != nil { if err != nil {
return err return err
@ -282,8 +282,8 @@ func Init() (err error) {
if curCfgs == nil { if curCfgs == nil {
curCfgs = map[string]interface{}{} curCfgs = map[string]interface{}{}
} }
//restart: only repeatload envs will be load // restart: only repeatload envs will be load
//reload_config: all envs will be reload except the skiped envs // reload_config: all envs will be reload except the skiped envs
if err = LoadFromEnv(curCfgs, loadAll); err != nil { if err = LoadFromEnv(curCfgs, loadAll); err != nil {
return err return err
} }
@ -312,7 +312,7 @@ func initCfgStore() (err error) {
if err != nil { if err != nil {
return err return err
} }
//migration check: if no data in the db , then will try to load from path // migration check: if no data in the db , then will try to load from path
m, err := CfgStore.Read() m, err := CfgStore.Read()
if err != nil { if err != nil {
return err return err
@ -453,7 +453,7 @@ func validLdapScope(cfg map[string]interface{}, isMigrate bool) {
} }
//AddMissedKey ... If the configure key is missing in the cfg map, add default value to it // AddMissedKey ... If the configure key is missing in the cfg map, add default value to it
func AddMissedKey(cfg map[string]interface{}) { func AddMissedKey(cfg map[string]interface{}) {
for k, v := range common.HarborStringKeysMap { for k, v := range common.HarborStringKeysMap {

View File

@ -199,7 +199,7 @@ func TestLoadFromEnvWithReloadConfigSkipPattern(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to load From env: %v", err) t.Fatalf("failed to load From env: %v", err)
} }
assert.Equal(t, "ldap_url", cfgsReload[common.LDAPURL]) //env value ignored assert.Equal(t, "ldap_url", cfgsReload[common.LDAPURL]) // env value ignored
os.Clearenv() os.Clearenv()

View File

@ -4,14 +4,14 @@ import (
"net/http" "net/http"
) )
//BaseHandler defines the handlers related with the chart server itself. // BaseHandler defines the handlers related with the chart server itself.
type BaseHandler struct { type BaseHandler struct {
//Proxy used to to transfer the traffic of requests // Proxy used to to transfer the traffic of requests
//It's mainly used to talk to the backend chart server // It's mainly used to talk to the backend chart server
trafficProxy *ProxyEngine trafficProxy *ProxyEngine
} }
//GetHealthStatus will return the health status of the backend chart repository server // GetHealthStatus will return the health status of the backend chart repository server
func (bh *BaseHandler) GetHealthStatus(w http.ResponseWriter, req *http.Request) { func (bh *BaseHandler) GetHealthStatus(w http.ResponseWriter, req *http.Request) {
bh.trafficProxy.ServeHTTP(w, req) bh.trafficProxy.ServeHTTP(w, req)
} }

File diff suppressed because one or more lines are too long

View File

@ -8,50 +8,50 @@ import (
beego_cache "github.com/astaxie/beego/cache" beego_cache "github.com/astaxie/beego/cache"
hlog "github.com/goharbor/harbor/src/common/utils/log" hlog "github.com/goharbor/harbor/src/common/utils/log"
//Enable redis cache adaptor // Enable redis cache adaptor
_ "github.com/astaxie/beego/cache/redis" _ "github.com/astaxie/beego/cache/redis"
) )
const ( const (
standardExpireTime = 3600 * time.Second standardExpireTime = 3600 * time.Second
redisENVKey = "_REDIS_URL" redisENVKey = "_REDIS_URL"
cacheDriverENVKey = "CHART_CACHE_DRIVER" //"memory" or "redis" cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis"
cacheDriverMem = "memory" cacheDriverMem = "memory"
cacheDriverRedis = "redis" cacheDriverRedis = "redis"
cacheCollectionName = "helm_chart_cache" cacheCollectionName = "helm_chart_cache"
) )
//ChartCache is designed to cache some processed data for repeated accessing // ChartCache is designed to cache some processed data for repeated accessing
//to improve the performance // to improve the performance
type ChartCache struct { type ChartCache struct {
//Cache driver // Cache driver
cache beego_cache.Cache cache beego_cache.Cache
//Keep the driver type // Keep the driver type
driverType string driverType string
//To indicate if the chart cache is enabled // To indicate if the chart cache is enabled
isEnabled bool isEnabled bool
} }
//ChartCacheConfig keeps the configurations of ChartCache // ChartCacheConfig keeps the configurations of ChartCache
type ChartCacheConfig struct { type ChartCacheConfig struct {
//Only support 'in-memory' and 'redis' now // Only support 'in-memory' and 'redis' now
DriverType string DriverType string
//Align with config // Align with config
Config string Config string
} }
//NewChartCache is constructor of ChartCache // NewChartCache is constructor of ChartCache
//If return nil, that means no cache is enabled for chart repository server // If return nil, that means no cache is enabled for chart repository server
func NewChartCache(config *ChartCacheConfig) *ChartCache { func NewChartCache(config *ChartCacheConfig) *ChartCache {
//Never return nil object // Never return nil object
chartCache := &ChartCache{ chartCache := &ChartCache{
isEnabled: false, isEnabled: false,
} }
//Double check the configurations are what we want // Double check the configurations are what we want
if config == nil { if config == nil {
return chartCache return chartCache
} }
@ -66,13 +66,13 @@ func NewChartCache(config *ChartCacheConfig) *ChartCache {
} }
} }
//Try to create the upstream cache // Try to create the upstream cache
cache := initCacheDriver(config) cache := initCacheDriver(config)
if cache == nil { if cache == nil {
return chartCache return chartCache
} }
//Cache enabled // Cache enabled
chartCache.isEnabled = true chartCache.isEnabled = true
chartCache.driverType = config.DriverType chartCache.driverType = config.DriverType
chartCache.cache = cache chartCache.cache = cache
@ -80,74 +80,74 @@ func NewChartCache(config *ChartCacheConfig) *ChartCache {
return chartCache return chartCache
} }
//IsEnabled to indicate if the chart cache is successfully enabled // IsEnabled to indicate if the chart cache is successfully enabled
//The cache may be disabled if // The cache may be disabled if
// user does not set // user does not set
// wrong configurations // wrong configurations
func (chc *ChartCache) IsEnabled() bool { func (chc *ChartCache) IsEnabled() bool {
return chc.isEnabled return chc.isEnabled
} }
//PutChart caches the detailed data of chart version // PutChart caches the detailed data of chart version
func (chc *ChartCache) PutChart(chart *ChartVersionDetails) { func (chc *ChartCache) PutChart(chart *ChartVersionDetails) {
//If cache is not enabled, do nothing // If cache is not enabled, do nothing
if !chc.IsEnabled() { if !chc.IsEnabled() {
return return
} }
//As it's a valid json data anymore when retrieving back from redis cache, // As it's a valid json data anymore when retrieving back from redis cache,
//here we use separate methods to handle the data according to the driver type // here we use separate methods to handle the data according to the driver type
if chart != nil { if chart != nil {
var err error var err error
switch chc.driverType { switch chc.driverType {
case cacheDriverMem: case cacheDriverMem:
//Directly put object in // Directly put object in
err = chc.cache.Put(chart.Metadata.Digest, chart, standardExpireTime) err = chc.cache.Put(chart.Metadata.Digest, chart, standardExpireTime)
case cacheDriverRedis: case cacheDriverRedis:
//Marshal to json data before saving // Marshal to json data before saving
var jsonData []byte var jsonData []byte
if jsonData, err = json.Marshal(chart); err == nil { if jsonData, err = json.Marshal(chart); err == nil {
err = chc.cache.Put(chart.Metadata.Digest, jsonData, standardExpireTime) err = chc.cache.Put(chart.Metadata.Digest, jsonData, standardExpireTime)
} }
default: default:
//Should not reach here, but still put guard code here // Should not reach here, but still put guard code here
err = errors.New("Meet invalid cache driver") err = errors.New("Meet invalid cache driver")
} }
if err != nil { if err != nil {
//Just logged // Just logged
hlog.Errorf("Failed to cache chart object with error: %s\n", err) hlog.Errorf("Failed to cache chart object with error: %s\n", err)
hlog.Warningf("If cache driver is using 'redis', please check the related configurations or the network connection") hlog.Warningf("If cache driver is using 'redis', please check the related configurations or the network connection")
} }
} }
} }
//GetChart trys to retrieve it from the cache // GetChart trys to retrieve it from the cache
//If hit, return the cached item; // If hit, return the cached item;
//otherwise, nil object is returned // otherwise, nil object is returned
func (chc *ChartCache) GetChart(chartDigest string) *ChartVersionDetails { func (chc *ChartCache) GetChart(chartDigest string) *ChartVersionDetails {
//If cache is not enabled, do nothing // If cache is not enabled, do nothing
if !chc.IsEnabled() { if !chc.IsEnabled() {
return nil return nil
} }
object := chc.cache.Get(chartDigest) object := chc.cache.Get(chartDigest)
if object != nil { if object != nil {
//Try to convert data // Try to convert data
//First try the normal way // First try the normal way
if chartDetails, ok := object.(*ChartVersionDetails); ok { if chartDetails, ok := object.(*ChartVersionDetails); ok {
return chartDetails return chartDetails
} }
//Maybe json bytes // Maybe json bytes
if bytes, yes := object.([]byte); yes { if bytes, yes := object.([]byte); yes {
chartDetails := &ChartVersionDetails{} chartDetails := &ChartVersionDetails{}
err := json.Unmarshal(bytes, chartDetails) err := json.Unmarshal(bytes, chartDetails)
if err == nil { if err == nil {
return chartDetails return chartDetails
} }
//Just logged the error // Just logged the error
hlog.Errorf("Failed to retrieve chart from cache with error: %s", err) hlog.Errorf("Failed to retrieve chart from cache with error: %s", err)
} }
} }
@ -155,7 +155,7 @@ func (chc *ChartCache) GetChart(chartDigest string) *ChartVersionDetails {
return nil return nil
} }
//Initialize the cache driver based on the config // Initialize the cache driver based on the config
func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache { func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
switch cacheConfig.DriverType { switch cacheConfig.DriverType {
case cacheDriverMem: case cacheDriverMem:
@ -164,7 +164,7 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
case cacheDriverRedis: case cacheDriverRedis:
redisCache, err := beego_cache.NewCache(cacheDriverRedis, cacheConfig.Config) redisCache, err := beego_cache.NewCache(cacheDriverRedis, cacheConfig.Config)
if err != nil { if err != nil {
//Just logged // Just logged
hlog.Errorf("Failed to initialize redis cache: %s", err) hlog.Errorf("Failed to initialize redis cache: %s", err)
return nil return nil
} }
@ -175,7 +175,7 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache {
break break
} }
//Any other cases // Any other cases
hlog.Info("No cache is enabled for chart caching") hlog.Info("No cache is enabled for chart caching")
return nil return nil
} }

View File

@ -22,7 +22,7 @@ var (
} }
) )
//Test the no cache set scenario // Test the no cache set scenario
func TestNoCache(t *testing.T) { func TestNoCache(t *testing.T) {
chartCache := NewChartCache(nil) chartCache := NewChartCache(nil)
if chartCache == nil { if chartCache == nil {
@ -34,7 +34,7 @@ func TestNoCache(t *testing.T) {
} }
} }
//Test the in memory cache // Test the in memory cache
func TestInMemoryCache(t *testing.T) { func TestInMemoryCache(t *testing.T) {
chartCache := NewChartCache(&ChartCacheConfig{ chartCache := NewChartCache(&ChartCacheConfig{
DriverType: cacheDriverMem, DriverType: cacheDriverMem,
@ -58,8 +58,8 @@ func TestInMemoryCache(t *testing.T) {
} }
} }
//Test redis cache // Test redis cache
//Failed to config redis cache and then use in memory instead // Failed to config redis cache and then use in memory instead
func TestRedisCache(t *testing.T) { func TestRedisCache(t *testing.T) {
redisConfigV := make(map[string]string) redisConfigV := make(map[string]string)
redisConfigV["key"] = cacheCollectionName redisConfigV["key"] = cacheCollectionName

View File

@ -21,7 +21,7 @@ const (
valuesFileName = "values.yaml" valuesFileName = "values.yaml"
) )
//ChartVersionDetails keeps the detailed data info of the chart version // ChartVersionDetails keeps the detailed data info of the chart version
type ChartVersionDetails struct { type ChartVersionDetails struct {
Metadata *helm_repo.ChartVersion `json:"metadata"` Metadata *helm_repo.ChartVersion `json:"metadata"`
Dependencies []*chartutil.Dependency `json:"dependencies"` Dependencies []*chartutil.Dependency `json:"dependencies"`
@ -30,19 +30,19 @@ type ChartVersionDetails struct {
Security *SecurityReport `json:"security"` Security *SecurityReport `json:"security"`
} }
//SecurityReport keeps the info related with security // SecurityReport keeps the info related with security
//e.g.: digital signature, vulnerability scanning etc. // e.g.: digital signature, vulnerability scanning etc.
type SecurityReport struct { type SecurityReport struct {
Signature *DigitalSignature `json:"signature"` Signature *DigitalSignature `json:"signature"`
} }
//DigitalSignature used to indicate if the chart has been signed // DigitalSignature used to indicate if the chart has been signed
type DigitalSignature struct { type DigitalSignature struct {
Signed bool `json:"signed"` Signed bool `json:"signed"`
Provenance string `json:"prov_file"` Provenance string `json:"prov_file"`
} }
//ChartInfo keeps the information of the chart // ChartInfo keeps the information of the chart
type ChartInfo struct { type ChartInfo struct {
Name string Name string
TotalVersions uint32 `json:"total_versions"` TotalVersions uint32 `json:"total_versions"`
@ -54,27 +54,27 @@ type ChartInfo struct {
Deprecated bool Deprecated bool
} }
//ChartOperator is designed to process the contents of // ChartOperator is designed to process the contents of
//the specified chart version to get more details // the specified chart version to get more details
type ChartOperator struct{} type ChartOperator struct{}
//GetChartDetails parse the details from the provided content bytes // GetChartDetails parse the details from the provided content bytes
func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails, error) { func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails, error) {
if content == nil || len(content) == 0 { if content == nil || len(content) == 0 {
return nil, errors.New("zero content") return nil, errors.New("zero content")
} }
//Load chart from in-memory content // Load chart from in-memory content
reader := bytes.NewReader(content) reader := bytes.NewReader(content)
chartData, err := chartutil.LoadArchive(reader) chartData, err := chartutil.LoadArchive(reader)
if err != nil { if err != nil {
return nil, err return nil, err
} }
//Parse the requirements of chart // Parse the requirements of chart
requirements, err := chartutil.LoadRequirements(chartData) requirements, err := chartutil.LoadRequirements(chartData)
if err != nil { if err != nil {
//If no requirements.yaml, return empty dependency list // If no requirements.yaml, return empty dependency list
if _, ok := err.(chartutil.ErrNoRequirementsFile); ok { if _, ok := err.(chartutil.ErrNoRequirementsFile); ok {
requirements = &chartutil.Requirements{ requirements = &chartutil.Requirements{
Dependencies: make([]*chartutil.Dependency, 0), Dependencies: make([]*chartutil.Dependency, 0),
@ -86,16 +86,16 @@ func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails,
var values map[string]interface{} var values map[string]interface{}
files := make(map[string]string) files := make(map[string]string)
//Parse values // Parse values
if chartData.Values != nil { if chartData.Values != nil {
values = parseRawValues([]byte(chartData.Values.GetRaw())) values = parseRawValues([]byte(chartData.Values.GetRaw()))
if len(values) > 0 { if len(values) > 0 {
//Append values.yaml file // Append values.yaml file
files[valuesFileName] = chartData.Values.Raw files[valuesFileName] = chartData.Values.Raw
} }
} }
//Append other files like 'README.md' // Append other files like 'README.md'
for _, v := range chartData.GetFiles() { for _, v := range chartData.GetFiles() {
if v.TypeUrl == readmeFileName { if v.TypeUrl == readmeFileName {
files[readmeFileName] = string(v.GetValue()) files[readmeFileName] = string(v.GetValue())
@ -112,7 +112,7 @@ func (cho *ChartOperator) GetChartDetails(content []byte) (*ChartVersionDetails,
return theChart, nil return theChart, nil
} }
//GetChartList returns a reorganized chart list // GetChartList returns a reorganized chart list
func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) { func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
if content == nil || len(content) == 0 { if content == nil || len(content) == 0 {
return nil, errors.New("zero content") return nil, errors.New("zero content")
@ -140,8 +140,8 @@ func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
} }
} }
//Sort the chart list by the updated time which is the create time // Sort the chart list by the updated time which is the create time
//of the latest version of the chart. // of the latest version of the chart.
sort.Slice(chartList, func(i, j int) bool { sort.Slice(chartList, func(i, j int) bool {
if chartList[i].Updated.Equal(chartList[j].Updated) { if chartList[i].Updated.Equal(chartList[j].Updated) {
return strings.Compare(chartList[i].Name, chartList[j].Name) < 0 return strings.Compare(chartList[i].Name, chartList[j].Name) < 0
@ -153,7 +153,7 @@ func (cho *ChartOperator) GetChartList(content []byte) ([]*ChartInfo, error) {
return chartList, nil return chartList, nil
} }
//GetChartVersions returns the chart versions // GetChartVersions returns the chart versions
func (cho *ChartOperator) GetChartVersions(content []byte) (helm_repo.ChartVersions, error) { func (cho *ChartOperator) GetChartVersions(content []byte) (helm_repo.ChartVersions, error) {
if content == nil || len(content) == 0 { if content == nil || len(content) == 0 {
return nil, errors.New("zero content") return nil, errors.New("zero content")
@ -167,7 +167,7 @@ func (cho *ChartOperator) GetChartVersions(content []byte) (helm_repo.ChartVersi
return chartVersions, nil return chartVersions, nil
} }
//Get the latest and oldest chart versions // Get the latest and oldest chart versions
func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_repo.ChartVersion, oldestChart *helm_repo.ChartVersion) { func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_repo.ChartVersion, oldestChart *helm_repo.ChartVersion) {
if len(chartVersions) == 1 { if len(chartVersions) == 1 {
return chartVersions[0], chartVersions[0] return chartVersions[0], chartVersions[0]
@ -176,18 +176,18 @@ func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_r
for _, chartVersion := range chartVersions { for _, chartVersion := range chartVersions {
currentV, err := semver.NewVersion(chartVersion.Version) currentV, err := semver.NewVersion(chartVersion.Version)
if err != nil { if err != nil {
//ignore it, just logged // ignore it, just logged
hlog.Warningf("Malformed semversion %s for the chart %s", chartVersion.Version, chartVersion.Name) hlog.Warningf("Malformed semversion %s for the chart %s", chartVersion.Version, chartVersion.Name)
continue continue
} }
//Find latest chart // Find latest chart
if latestChart == nil { if latestChart == nil {
latestChart = chartVersion latestChart = chartVersion
} else { } else {
lVersion, err := semver.NewVersion(latestChart.Version) lVersion, err := semver.NewVersion(latestChart.Version)
if err != nil { if err != nil {
//ignore it, just logged // ignore it, just logged
hlog.Warningf("Malformed semversion %s for the chart %s", latestChart.Version, chartVersion.Name) hlog.Warningf("Malformed semversion %s for the chart %s", latestChart.Version, chartVersion.Name)
continue continue
} }
@ -208,7 +208,7 @@ func getTheTwoCharts(chartVersions helm_repo.ChartVersions) (latestChart *helm_r
return latestChart, oldestChart return latestChart, oldestChart
} }
//Parse the raw values to value map // Parse the raw values to value map
func parseRawValues(rawValue []byte) map[string]interface{} { func parseRawValues(rawValue []byte) map[string]interface{} {
valueMap := make(map[string]interface{}) valueMap := make(map[string]interface{})
@ -226,7 +226,7 @@ func parseRawValues(rawValue []byte) map[string]interface{} {
return valueMap return valueMap
} }
//Recursively read value // Recursively read value
func readValue(values map[string]interface{}, keyPrefix string, valueMap map[string]interface{}) { func readValue(values map[string]interface{}, keyPrefix string, valueMap map[string]interface{}) {
for key, value := range values { for key, value := range values {
longKey := key longKey := key

View File

@ -17,18 +17,18 @@ const (
idleConnectionTimeout = 30 * time.Second idleConnectionTimeout = 30 * time.Second
) )
//ChartClient is a http client to get the content from the external http server // ChartClient is a http client to get the content from the external http server
type ChartClient struct { type ChartClient struct {
//HTTP client // HTTP client
httpClient *http.Client httpClient *http.Client
//Auth info // Auth info
credentail *Credential credentail *Credential
} }
//NewChartClient is constructor of ChartClient // NewChartClient is constructor of ChartClient
//credentail can be nil // credentail can be nil
func NewChartClient(credentail *Credential) *ChartClient { //Create http client with customized timeouts func NewChartClient(credentail *Credential) *ChartClient { // Create http client with customized timeouts
client := &http.Client{ client := &http.Client{
Timeout: clientTimeout, Timeout: clientTimeout,
Transport: &http.Transport{ Transport: &http.Transport{
@ -43,7 +43,7 @@ func NewChartClient(credentail *Credential) *ChartClient { //Create http client
} }
} }
//GetContent get the bytes from the specified url // GetContent get the bytes from the specified url
func (cc *ChartClient) GetContent(addr string) ([]byte, error) { func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
response, err := cc.sendRequest(addr, http.MethodGet, nil, []int{http.StatusOK}) response, err := cc.sendRequest(addr, http.MethodGet, nil, []int{http.StatusOK})
if err != nil { if err != nil {
@ -59,13 +59,13 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
return content, nil return content, nil
} }
//DeleteContent sends deleting request to the addr to delete content // DeleteContent sends deleting request to the addr to delete content
func (cc *ChartClient) DeleteContent(addr string) error { func (cc *ChartClient) DeleteContent(addr string) error {
_, err := cc.sendRequest(addr, http.MethodDelete, nil, []int{http.StatusOK}) _, err := cc.sendRequest(addr, http.MethodDelete, nil, []int{http.StatusOK})
return err return err
} }
//sendRequest sends requests to the addr with the specified spec // sendRequest sends requests to the addr with the specified spec
func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader, expectedCodes []int) (*http.Response, error) { func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader, expectedCodes []int) (*http.Response, error) {
if len(strings.TrimSpace(addr)) == 0 { if len(strings.TrimSpace(addr)) == 0 {
return nil, errors.New("empty url is not allowed") return nil, errors.New("empty url is not allowed")
@ -81,7 +81,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader, e
return nil, err return nil, err
} }
//Set basic auth // Set basic auth
if cc.credentail != nil { if cc.credentail != nil {
request.SetBasicAuth(cc.credentail.Username, cc.credentail.Password) request.SetBasicAuth(cc.credentail.Username, cc.credentail.Password)
} }

View File

@ -15,58 +15,58 @@ const (
passwordKey = "UI_SECRET" passwordKey = "UI_SECRET"
) )
//Credential keeps the username and password for the basic auth // Credential keeps the username and password for the basic auth
type Credential struct { type Credential struct {
Username string Username string
Password string Password string
} }
//Controller is used to handle flows of related requests based on the corresponding handlers // Controller is used to handle flows of related requests based on the corresponding handlers
//A reverse proxy will be created and managed to proxy the related traffics between API and // A reverse proxy will be created and managed to proxy the related traffics between API and
//backend chart server // backend chart server
type Controller struct { type Controller struct {
//The access endpoint of the backend chart repository server // The access endpoint of the backend chart repository server
backendServerAddr *url.URL backendServerAddr *url.URL
//To cover the server info and status requests // To cover the server info and status requests
baseHandler *BaseHandler baseHandler *BaseHandler
//To cover the chart repository requests // To cover the chart repository requests
repositoryHandler *RepositoryHandler repositoryHandler *RepositoryHandler
//To cover all the manipulation requests // To cover all the manipulation requests
manipulationHandler *ManipulationHandler manipulationHandler *ManipulationHandler
//To cover the other utility requests // To cover the other utility requests
utilityHandler *UtilityHandler utilityHandler *UtilityHandler
} }
//NewController is constructor of the chartserver.Controller // NewController is constructor of the chartserver.Controller
func NewController(backendServer *url.URL) (*Controller, error) { func NewController(backendServer *url.URL) (*Controller, error) {
if backendServer == nil { if backendServer == nil {
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required") return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
} }
//Try to create credential // Try to create credential
cred := &Credential{ cred := &Credential{
Username: userName, Username: userName,
Password: os.Getenv(passwordKey), Password: os.Getenv(passwordKey),
} }
//Use customized reverse proxy // Use customized reverse proxy
proxy := NewProxyEngine(backendServer, cred) proxy := NewProxyEngine(backendServer, cred)
//Create http client with customized timeouts // Create http client with customized timeouts
client := NewChartClient(cred) client := NewChartClient(cred)
//Initialize chart operator for use // Initialize chart operator for use
operator := &ChartOperator{} operator := &ChartOperator{}
//Creat cache // Creat cache
cacheCfg, err := getCacheConfig() cacheCfg, err := getCacheConfig()
if err != nil { if err != nil {
//just log the error // just log the error
//will not break the whole flow if failed to create cache // will not break the whole flow if failed to create cache
hlog.Errorf("failed to get cache configuration with error: %s", err) hlog.Errorf("failed to get cache configuration with error: %s", err)
} }
cache := NewChartCache(cacheCfg) cache := NewChartCache(cacheCfg)
@ -97,33 +97,33 @@ func NewController(backendServer *url.URL) (*Controller, error) {
}, nil }, nil
} }
//GetBaseHandler returns the reference of BaseHandler // GetBaseHandler returns the reference of BaseHandler
func (c *Controller) GetBaseHandler() *BaseHandler { func (c *Controller) GetBaseHandler() *BaseHandler {
return c.baseHandler return c.baseHandler
} }
//GetRepositoryHandler returns the reference of RepositoryHandler // GetRepositoryHandler returns the reference of RepositoryHandler
func (c *Controller) GetRepositoryHandler() *RepositoryHandler { func (c *Controller) GetRepositoryHandler() *RepositoryHandler {
return c.repositoryHandler return c.repositoryHandler
} }
//GetManipulationHandler returns the reference of ManipulationHandler // GetManipulationHandler returns the reference of ManipulationHandler
func (c *Controller) GetManipulationHandler() *ManipulationHandler { func (c *Controller) GetManipulationHandler() *ManipulationHandler {
return c.manipulationHandler return c.manipulationHandler
} }
//GetUtilityHandler returns the reference of UtilityHandler // GetUtilityHandler returns the reference of UtilityHandler
func (c *Controller) GetUtilityHandler() *UtilityHandler { func (c *Controller) GetUtilityHandler() *UtilityHandler {
return c.utilityHandler return c.utilityHandler
} }
//What's the cache driver if it is set // What's the cache driver if it is set
func parseCacheDriver() (string, bool) { func parseCacheDriver() (string, bool) {
driver, ok := os.LookupEnv(cacheDriverENVKey) driver, ok := os.LookupEnv(cacheDriverENVKey)
return strings.ToLower(driver), ok return strings.ToLower(driver), ok
} }
//Get and parse the configuration for the chart cache // Get and parse the configuration for the chart cache
func getCacheConfig() (*ChartCacheConfig, error) { func getCacheConfig() (*ChartCacheConfig, error) {
driver, isSet := parseCacheDriver() driver, isSet := parseCacheDriver()
if !isSet { if !isSet {

View File

@ -12,14 +12,14 @@ import (
helm_repo "k8s.io/helm/pkg/repo" helm_repo "k8s.io/helm/pkg/repo"
) )
//Prepare, start the mock servers // Prepare, start the mock servers
func TestStartServers(t *testing.T) { func TestStartServers(t *testing.T) {
if err := startMockServers(); err != nil { if err := startMockServers(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
} }
//Test /health // Test /health
func TestGetHealthOfBaseHandler(t *testing.T) { func TestGetHealthOfBaseHandler(t *testing.T) {
content, err := httpClient.GetContent(fmt.Sprintf("%s/health", getTheAddrOfFrontServer())) content, err := httpClient.GetContent(fmt.Sprintf("%s/health", getTheAddrOfFrontServer()))
if err != nil { if err != nil {
@ -36,7 +36,7 @@ func TestGetHealthOfBaseHandler(t *testing.T) {
} }
} }
//Get /repo1/index.yaml // Get /repo1/index.yaml
func TestGetIndexYamlByRepo(t *testing.T) { func TestGetIndexYamlByRepo(t *testing.T) {
indexFile, err := getIndexYaml("/repo1/index.yaml") indexFile, err := getIndexYaml("/repo1/index.yaml")
if err != nil { if err != nil {
@ -48,7 +48,7 @@ func TestGetIndexYamlByRepo(t *testing.T) {
} }
} }
//Test get /index.yaml // Test get /index.yaml
func TestGetUnifiedYamlFile(t *testing.T) { func TestGetUnifiedYamlFile(t *testing.T) {
indexFile, err := getIndexYaml("/index.yaml") indexFile, err := getIndexYaml("/index.yaml")
if err != nil { if err != nil {
@ -70,8 +70,8 @@ func TestGetUnifiedYamlFile(t *testing.T) {
} }
} }
//Test download /:repo/charts/chart.tar // Test download /:repo/charts/chart.tar
//Use this case to test the proxy function // Use this case to test the proxy function
func TestDownloadChart(t *testing.T) { func TestDownloadChart(t *testing.T) {
content, err := httpClient.GetContent(fmt.Sprintf("%s/repo1/charts/harbor-0.2.0.tgz", getTheAddrOfFrontServer())) content, err := httpClient.GetContent(fmt.Sprintf("%s/repo1/charts/harbor-0.2.0.tgz", getTheAddrOfFrontServer()))
if err != nil { if err != nil {
@ -86,7 +86,7 @@ func TestDownloadChart(t *testing.T) {
} }
} }
//Test get /api/:repo/charts // Test get /api/:repo/charts
func TestRetrieveChartList(t *testing.T) { func TestRetrieveChartList(t *testing.T) {
content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts", getTheAddrOfFrontServer())) content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts", getTheAddrOfFrontServer()))
if err != nil { if err != nil {
@ -116,7 +116,7 @@ func TestRetrieveChartList(t *testing.T) {
} }
} }
//Test get /api/:repo/charts/:chart_name/:version // Test get /api/:repo/charts/:chart_name/:version
func TestGetChartVersion(t *testing.T) { func TestGetChartVersion(t *testing.T) {
content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/0.2.0", getTheAddrOfFrontServer())) content, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/0.2.0", getTheAddrOfFrontServer()))
if err != nil { if err != nil {
@ -145,7 +145,7 @@ func TestGetChartVersion(t *testing.T) {
} }
} }
//Test get /api/:repo/charts/:chart_name/:version with none-existing version // Test get /api/:repo/charts/:chart_name/:version with none-existing version
func TestGetChartVersionWithError(t *testing.T) { func TestGetChartVersionWithError(t *testing.T) {
_, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/1.0.0", getTheAddrOfFrontServer())) _, err := httpClient.GetContent(fmt.Sprintf("%s/api/repo1/charts/harbor/1.0.0", getTheAddrOfFrontServer()))
if err == nil { if err == nil {
@ -153,8 +153,8 @@ func TestGetChartVersionWithError(t *testing.T) {
} }
} }
//Get /api/repo1/charts/harbor // Get /api/repo1/charts/harbor
//401 will be rewritten to 500 with specified error // 401 will be rewritten to 500 with specified error
func TestResponseRewrite(t *testing.T) { func TestResponseRewrite(t *testing.T) {
response, err := http.Get(fmt.Sprintf("%s/api/repo1/charts/harbor", getTheAddrOfFrontServer())) response, err := http.Get(fmt.Sprintf("%s/api/repo1/charts/harbor", getTheAddrOfFrontServer()))
if err != nil { if err != nil {
@ -185,12 +185,12 @@ func TestResponseRewrite(t *testing.T) {
} }
} }
//Clear environments // Clear environments
func TestStopServers(t *testing.T) { func TestStopServers(t *testing.T) {
stopMockServers() stopMockServers()
} }
//Utility method for getting index yaml file // Utility method for getting index yaml file
func getIndexYaml(path string) (*helm_repo.IndexFile, error) { func getIndexYaml(path string) (*helm_repo.IndexFile, error) {
content, err := httpClient.GetContent(fmt.Sprintf("%s%s", getTheAddrOfFrontServer(), path)) content, err := httpClient.GetContent(fmt.Sprintf("%s%s", getTheAddrOfFrontServer(), path))
if err != nil { if err != nil {

View File

@ -15,34 +15,34 @@ import (
) )
const ( const (
//NamespaceContextKey is context key for the namespace // NamespaceContextKey is context key for the namespace
NamespaceContextKey ContextKey = ":repo" NamespaceContextKey ContextKey = ":repo"
) )
//ContextKey is defined for add value in the context of http request // ContextKey is defined for add value in the context of http request
type ContextKey string type ContextKey string
//ManipulationHandler includes all the handler methods for the purpose of manipulating the // ManipulationHandler includes all the handler methods for the purpose of manipulating the
//chart repository // chart repository
type ManipulationHandler struct { type ManipulationHandler struct {
//Proxy used to to transfer the traffic of requests // Proxy used to to transfer the traffic of requests
//It's mainly used to talk to the backend chart server // It's mainly used to talk to the backend chart server
trafficProxy *ProxyEngine trafficProxy *ProxyEngine
//Parse and process the chart version to provide required info data // Parse and process the chart version to provide required info data
chartOperator *ChartOperator chartOperator *ChartOperator
//HTTP client used to call the realted APIs of the backend chart repositories // HTTP client used to call the realted APIs of the backend chart repositories
apiClient *ChartClient apiClient *ChartClient
//Point to the url of the backend server // Point to the url of the backend server
backendServerAddress *url.URL backendServerAddress *url.URL
//Cache the chart data // Cache the chart data
chartCache *ChartCache chartCache *ChartCache
} }
//ListCharts lists all the charts under the specified namespace // ListCharts lists all the charts under the specified namespace
func (mh *ManipulationHandler) ListCharts(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) ListCharts(w http.ResponseWriter, req *http.Request) {
url := strings.TrimPrefix(req.URL.String(), "/") url := strings.TrimPrefix(req.URL.String(), "/")
url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url) url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url)
@ -68,14 +68,14 @@ func (mh *ManipulationHandler) ListCharts(w http.ResponseWriter, req *http.Reque
writeJSONData(w, jsonData) writeJSONData(w, jsonData)
} }
//GetChart returns all the chart versions under the specified chart // GetChart returns all the chart versions under the specified chart
func (mh *ManipulationHandler) GetChart(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) GetChart(w http.ResponseWriter, req *http.Request) {
mh.trafficProxy.ServeHTTP(w, req) mh.trafficProxy.ServeHTTP(w, req)
} }
//GetChartVersion get the specified version for one chart // GetChartVersion get the specified version for one chart
//This handler should return the details of the chart version, // This handler should return the details of the chart version,
//maybe including metadata,dependencies and values etc. // maybe including metadata,dependencies and values etc.
func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.Request) {
chartV, err := mh.getChartVersion(req.URL.String()) chartV, err := mh.getChartVersion(req.URL.String())
if err != nil { if err != nil {
@ -83,8 +83,8 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
return return
} }
//Get and check namespace // Get and check namespace
//even we get the data from cache // even we get the data from cache
var namespace string var namespace string
repoValue := req.Context().Value(NamespaceContextKey) repoValue := req.Context().Value(NamespaceContextKey)
@ -99,17 +99,17 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
return return
} }
//Query cache // Query cache
chartDetails := mh.chartCache.GetChart(chartV.Digest) chartDetails := mh.chartCache.GetChart(chartV.Digest)
if chartDetails == nil { if chartDetails == nil {
//NOT hit!! // NOT hit!!
content, err := mh.getChartVersionContent(namespace, chartV.URLs[0]) content, err := mh.getChartVersionContent(namespace, chartV.URLs[0])
if err != nil { if err != nil {
WriteInternalError(w, err) WriteInternalError(w, err)
return return
} }
//Process bytes and get more details of chart version // Process bytes and get more details of chart version
chartDetails, err = mh.chartOperator.GetChartDetails(content) chartDetails, err = mh.chartOperator.GetChartDetails(content)
if err != nil { if err != nil {
WriteInternalError(w, err) WriteInternalError(w, err)
@ -117,35 +117,35 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
} }
chartDetails.Metadata = chartV chartDetails.Metadata = chartV
//Put it into the cache for next access // Put it into the cache for next access
mh.chartCache.PutChart(chartDetails) mh.chartCache.PutChart(chartDetails)
} else { } else {
//Just logged // Just logged
hlog.Debugf("Get detailed data from cache for chart: %s:%s (%s)", hlog.Debugf("Get detailed data from cache for chart: %s:%s (%s)",
chartDetails.Metadata.Name, chartDetails.Metadata.Name,
chartDetails.Metadata.Version, chartDetails.Metadata.Version,
chartDetails.Metadata.Digest) chartDetails.Metadata.Digest)
} }
//The change of prov file will not cause any influence to the digest of chart, // The change of prov file will not cause any influence to the digest of chart,
//and then the digital signature status should be not cached // and then the digital signature status should be not cached
// //
//Generate the security report // Generate the security report
//prov file share same endpoint with the chart version // prov file share same endpoint with the chart version
//Just add .prov suffix to the chart version to form the path of prov file // Just add .prov suffix to the chart version to form the path of prov file
//Anyway, there will be a report about the digital signature status // Anyway, there will be a report about the digital signature status
chartDetails.Security = &SecurityReport{ chartDetails.Security = &SecurityReport{
Signature: &DigitalSignature{ Signature: &DigitalSignature{
Signed: false, Signed: false,
}, },
} }
//Try to get the prov file to confirm if it is exitsing // Try to get the prov file to confirm if it is exitsing
provFilePath := fmt.Sprintf("%s.prov", chartV.URLs[0]) provFilePath := fmt.Sprintf("%s.prov", chartV.URLs[0])
provBytes, err := mh.getChartVersionContent(namespace, provFilePath) provBytes, err := mh.getChartVersionContent(namespace, provFilePath)
if err == nil && len(provBytes) > 0 { if err == nil && len(provBytes) > 0 {
chartDetails.Security.Signature.Signed = true chartDetails.Security.Signature.Signed = true
chartDetails.Security.Signature.Provenance = provFilePath chartDetails.Security.Signature.Provenance = provFilePath
} else { } else {
//Just log it // Just log it
hlog.Errorf("Failed to get prov file for chart %s with error: %s, got %d bytes", chartV.Name, err.Error(), len(provBytes)) hlog.Errorf("Failed to get prov file for chart %s with error: %s, got %d bytes", chartV.Name, err.Error(), len(provBytes))
} }
@ -158,22 +158,22 @@ func (mh *ManipulationHandler) GetChartVersion(w http.ResponseWriter, req *http.
writeJSONData(w, bytes) writeJSONData(w, bytes)
} }
//UploadChartVersion will save the new version of the chart to the backend storage // UploadChartVersion will save the new version of the chart to the backend storage
func (mh *ManipulationHandler) UploadChartVersion(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) UploadChartVersion(w http.ResponseWriter, req *http.Request) {
mh.trafficProxy.ServeHTTP(w, req) mh.trafficProxy.ServeHTTP(w, req)
} }
//UploadProvenanceFile will save the provenance file of the chart to the backend storage // UploadProvenanceFile will save the provenance file of the chart to the backend storage
func (mh *ManipulationHandler) UploadProvenanceFile(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) UploadProvenanceFile(w http.ResponseWriter, req *http.Request) {
mh.trafficProxy.ServeHTTP(w, req) mh.trafficProxy.ServeHTTP(w, req)
} }
//DeleteChartVersion will delete the specified version of the chart // DeleteChartVersion will delete the specified version of the chart
func (mh *ManipulationHandler) DeleteChartVersion(w http.ResponseWriter, req *http.Request) { func (mh *ManipulationHandler) DeleteChartVersion(w http.ResponseWriter, req *http.Request) {
mh.trafficProxy.ServeHTTP(w, req) mh.trafficProxy.ServeHTTP(w, req)
} }
//Get the basic metadata of chart version // Get the basic metadata of chart version
func (mh *ManipulationHandler) getChartVersion(subPath string) (*helm_repo.ChartVersion, error) { func (mh *ManipulationHandler) getChartVersion(subPath string) (*helm_repo.ChartVersion, error) {
url := fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), strings.TrimPrefix(subPath, "/")) url := fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), strings.TrimPrefix(subPath, "/"))
@ -190,7 +190,7 @@ func (mh *ManipulationHandler) getChartVersion(subPath string) (*helm_repo.Chart
return chartVersion, nil return chartVersion, nil
} }
//Get the content bytes of the chart version // Get the content bytes of the chart version
func (mh *ManipulationHandler) getChartVersionContent(namespace string, subPath string) ([]byte, error) { func (mh *ManipulationHandler) getChartVersionContent(namespace string, subPath string) ([]byte, error) {
url := path.Join(namespace, subPath) url := path.Join(namespace, subPath)
url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url) url = fmt.Sprintf("%s/%s", mh.backendServerAddress.String(), url)

View File

@ -22,62 +22,62 @@ const (
maxWorkers = 10 maxWorkers = 10
) )
//RepositoryHandler defines all the handlers to handle the requests related with chart repository // RepositoryHandler defines all the handlers to handle the requests related with chart repository
//e.g: index.yaml and downloading chart objects // e.g: index.yaml and downloading chart objects
type RepositoryHandler struct { type RepositoryHandler struct {
//Proxy used to to transfer the traffic of requests // Proxy used to to transfer the traffic of requests
//It's mainly used to talk to the backend chart server // It's mainly used to talk to the backend chart server
trafficProxy *ProxyEngine trafficProxy *ProxyEngine
//HTTP client used to call the realted APIs of the backend chart repositories // HTTP client used to call the realted APIs of the backend chart repositories
apiClient *ChartClient apiClient *ChartClient
//Point to the url of the backend server // Point to the url of the backend server
backendServerAddress *url.URL backendServerAddress *url.URL
} }
//Pass work to the workers // Pass work to the workers
//'index' is the location of processing namespace/project in the list // 'index' is the location of processing namespace/project in the list
type workload struct { type workload struct {
index uint32 index uint32
} }
//Result returned by worker // Result returned by worker
type processedResult struct { type processedResult struct {
namespace string namespace string
indexFileOfRepo *helm_repo.IndexFile indexFileOfRepo *helm_repo.IndexFile
} }
//GetIndexFileWithNS will read the index.yaml data under the specified namespace // GetIndexFileWithNS will read the index.yaml data under the specified namespace
func (rh *RepositoryHandler) GetIndexFileWithNS(w http.ResponseWriter, req *http.Request) { func (rh *RepositoryHandler) GetIndexFileWithNS(w http.ResponseWriter, req *http.Request) {
rh.trafficProxy.ServeHTTP(w, req) rh.trafficProxy.ServeHTTP(w, req)
} }
//GetIndexFile will read the index.yaml under all namespaces and merge them as a single one // GetIndexFile will read the index.yaml under all namespaces and merge them as a single one
//Please be aware that, to support this function, the backend chart repository server should // Please be aware that, to support this function, the backend chart repository server should
//enable multi-tenancies // enable multi-tenancies
func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Request) { func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Request) {
//Get project manager references // Get project manager references
projectMgr, err := filter.GetProjectManager(req) projectMgr, err := filter.GetProjectManager(req)
if err != nil { if err != nil {
WriteInternalError(w, err) WriteInternalError(w, err)
return return
} }
//Get all the projects // Get all the projects
results, err := projectMgr.List(nil) results, err := projectMgr.List(nil)
if err != nil { if err != nil {
WriteInternalError(w, err) WriteInternalError(w, err)
return return
} }
//If no projects existing, return empty index.yaml content immediately // If no projects existing, return empty index.yaml content immediately
if results.Total == 0 { if results.Total == 0 {
w.Write(emptyIndexFile()) w.Write(emptyIndexFile())
return return
} }
//The final merged index file // The final merged index file
mergedIndexFile := &helm_repo.IndexFile{ mergedIndexFile := &helm_repo.IndexFile{
APIVersion: "v1", APIVersion: "v1",
Entries: make(map[string]helm_repo.ChartVersions), Entries: make(map[string]helm_repo.ChartVersions),
@ -85,20 +85,20 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
PublicKeys: []string{}, PublicKeys: []string{},
} }
//Retrieve index.yaml for repositories // Retrieve index.yaml for repositories
workerPool := make(chan *workload, maxWorkers) workerPool := make(chan *workload, maxWorkers)
//Sync the output results from the retriever // Sync the output results from the retriever
resultChan := make(chan *processedResult, 1) resultChan := make(chan *processedResult, 1)
//Receive error // Receive error
errorChan := make(chan error, 1) errorChan := make(chan error, 1)
//Signal chan for merging work // Signal chan for merging work
mergeDone := make(chan struct{}, 1) mergeDone := make(chan struct{}, 1)
//Total projects/namespaces // Total projects/namespaces
total := uint32(results.Total) total := uint32(results.Total)
//Track all the background threads // Track all the background threads
waitGroup := new(sync.WaitGroup) waitGroup := new(sync.WaitGroup)
//Initialize // Initialize
initialItemCount := maxWorkers initialItemCount := maxWorkers
if total < maxWorkers { if total < maxWorkers {
initialItemCount = int(total) initialItemCount = int(total)
@ -107,11 +107,11 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
workerPool <- &workload{uint32(i)} workerPool <- &workload{uint32(i)}
} }
//Atomtic index // Atomtic index
var indexRef uint32 var indexRef uint32
atomic.AddUint32(&indexRef, uint32(initialItemCount-1)) atomic.AddUint32(&indexRef, uint32(initialItemCount-1))
//Start the index files merging thread // Start the index files merging thread
go func() { go func() {
defer func() { defer func() {
mergeDone <- struct{}{} mergeDone <- struct{}{}
@ -122,8 +122,8 @@ func (rh *RepositoryHandler) GetIndexFile(w http.ResponseWriter, req *http.Reque
} }
}() }()
//Retrieve the index files for the repositories // Retrieve the index files for the repositories
//and blocking here // and blocking here
LOOP: LOOP:
for { for {
select { select {
@ -131,16 +131,16 @@ LOOP:
if work.index >= total { if work.index >= total {
break LOOP break LOOP
} }
//Process // Process
//New one // New one
waitGroup.Add(1) waitGroup.Add(1)
namespace := results.Projects[work.index].Name namespace := results.Projects[work.index].Name
go func(ns string) { go func(ns string) {
//Return the worker back to the pool // Return the worker back to the pool
defer func() { defer func() {
waitGroup.Done() //done waitGroup.Done() // done
//Put one. The invalid index will be treated as a signal to quit loop // Put one. The invalid index will be treated as a signal to quit loop
nextOne := atomic.AddUint32(&indexRef, 1) nextOne := atomic.AddUint32(&indexRef, 1)
workerPool <- &workload{nextOne} workerPool <- &workload{nextOne}
}() }()
@ -151,39 +151,39 @@ LOOP:
return return
} }
//Output // Output
resultChan <- &processedResult{ resultChan <- &processedResult{
namespace: ns, namespace: ns,
indexFileOfRepo: indexFile, indexFileOfRepo: indexFile,
} }
}(namespace) }(namespace)
case err = <-errorChan: case err = <-errorChan:
//Quit earlier // Quit earlier
break LOOP break LOOP
case <-req.Context().Done(): case <-req.Context().Done():
//Quit earlier // Quit earlier
err = errors.New("request of getting index yaml file is aborted") err = errors.New("request of getting index yaml file is aborted")
break LOOP break LOOP
} }
} }
//Hold util all the retrieving work are done // Hold util all the retrieving work are done
waitGroup.Wait() waitGroup.Wait()
//close consumer channel // close consumer channel
close(resultChan) close(resultChan)
//Wait until merging thread quit // Wait until merging thread quit
<-mergeDone <-mergeDone
//All the threads are done // All the threads are done
//Met an error // Met an error
if err != nil { if err != nil {
WriteInternalError(w, err) WriteInternalError(w, err)
return return
} }
//Remove duplicated keys in public key list // Remove duplicated keys in public key list
hash := make(map[string]string) hash := make(map[string]string)
for _, key := range mergedIndexFile.PublicKeys { for _, key := range mergedIndexFile.PublicKeys {
hash[key] = key hash[key] = key
@ -202,15 +202,15 @@ LOOP:
w.Write(bytes) w.Write(bytes)
} }
//DownloadChartObject will download the stored chart object to the client // DownloadChartObject will download the stored chart object to the client
//e.g: helm install // e.g: helm install
func (rh *RepositoryHandler) DownloadChartObject(w http.ResponseWriter, req *http.Request) { func (rh *RepositoryHandler) DownloadChartObject(w http.ResponseWriter, req *http.Request) {
rh.trafficProxy.ServeHTTP(w, req) rh.trafficProxy.ServeHTTP(w, req)
} }
//Get the index yaml file under the specified namespace from the backend server // Get the index yaml file under the specified namespace from the backend server
func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.IndexFile, error) { func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.IndexFile, error) {
//Join url path // Join url path
url := path.Join(namespace, "index.yaml") url := path.Join(namespace, "index.yaml")
url = fmt.Sprintf("%s/%s", rh.backendServerAddress.String(), url) url = fmt.Sprintf("%s/%s", rh.backendServerAddress.String(), url)
hlog.Debugf("Getting index.yaml from '%s'", url) hlog.Debugf("Getting index.yaml from '%s'", url)
@ -220,7 +220,7 @@ func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.In
return nil, err return nil, err
} }
//Traverse to index file object for merging // Traverse to index file object for merging
indexFile := helm_repo.NewIndexFile() indexFile := helm_repo.NewIndexFile()
if err := yaml.Unmarshal(content, indexFile); err != nil { if err := yaml.Unmarshal(content, indexFile); err != nil {
return nil, err return nil, err
@ -229,41 +229,41 @@ func (rh *RepositoryHandler) getIndexYamlWithNS(namespace string) (*helm_repo.In
return indexFile, nil return indexFile, nil
} }
//Merge the content of mergingIndexFile to the baseIndex // Merge the content of mergingIndexFile to the baseIndex
//The chart url should be without --chart-url prefix // The chart url should be without --chart-url prefix
func (rh *RepositoryHandler) mergeIndexFile(namespace string, func (rh *RepositoryHandler) mergeIndexFile(namespace string,
baseIndex *helm_repo.IndexFile, baseIndex *helm_repo.IndexFile,
mergingIndexFile *helm_repo.IndexFile) { mergingIndexFile *helm_repo.IndexFile) {
//Append entries // Append entries
for chartName, chartVersions := range mergingIndexFile.Entries { for chartName, chartVersions := range mergingIndexFile.Entries {
nameWithNS := fmt.Sprintf("%s/%s", namespace, chartName) nameWithNS := fmt.Sprintf("%s/%s", namespace, chartName)
for _, version := range chartVersions { for _, version := range chartVersions {
version.Name = nameWithNS version.Name = nameWithNS
//Currently there is only one url // Currently there is only one url
for index, url := range version.URLs { for index, url := range version.URLs {
version.URLs[index] = path.Join(namespace, url) version.URLs[index] = path.Join(namespace, url)
} }
} }
//Appended // Appended
baseIndex.Entries[nameWithNS] = chartVersions baseIndex.Entries[nameWithNS] = chartVersions
} }
//Update generated time // Update generated time
if mergingIndexFile.Generated.After(baseIndex.Generated) { if mergingIndexFile.Generated.After(baseIndex.Generated) {
baseIndex.Generated = mergingIndexFile.Generated baseIndex.Generated = mergingIndexFile.Generated
} }
//Merge public keys // Merge public keys
baseIndex.PublicKeys = append(baseIndex.PublicKeys, mergingIndexFile.PublicKeys...) baseIndex.PublicKeys = append(baseIndex.PublicKeys, mergingIndexFile.PublicKeys...)
} }
//Generate empty index file // Generate empty index file
func emptyIndexFile() []byte { func emptyIndexFile() []byte {
emptyIndexFile := &helm_repo.IndexFile{} emptyIndexFile := &helm_repo.IndexFile{}
emptyIndexFile.Generated = time.Now() emptyIndexFile.Generated = time.Now()
//Ignore the error // Ignore the error
rawData, _ := json.Marshal(emptyIndexFile) rawData, _ := json.Marshal(emptyIndexFile)
return rawData return rawData

View File

@ -19,17 +19,17 @@ const (
contentLengthHeader = "Content-Length" contentLengthHeader = "Content-Length"
) )
//ProxyEngine is used to proxy the related traffics // ProxyEngine is used to proxy the related traffics
type ProxyEngine struct { type ProxyEngine struct {
//The backend target server the traffic will be forwarded to // The backend target server the traffic will be forwarded to
//Just in case we'll use it // Just in case we'll use it
backend *url.URL backend *url.URL
//Use go reverse proxy as engine // Use go reverse proxy as engine
engine *httputil.ReverseProxy engine *httputil.ReverseProxy
} }
//NewProxyEngine is constructor of NewProxyEngine // NewProxyEngine is constructor of NewProxyEngine
func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine { func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
return &ProxyEngine{ return &ProxyEngine{
backend: target, backend: target,
@ -43,17 +43,17 @@ func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
} }
} }
//ServeHTTP serves the incoming http requests // ServeHTTP serves the incoming http requests
func (pe *ProxyEngine) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (pe *ProxyEngine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
pe.engine.ServeHTTP(w, req) pe.engine.ServeHTTP(w, req)
} }
//Overwrite the http requests // Overwrite the http requests
func director(target *url.URL, cred *Credential, req *http.Request) { func director(target *url.URL, cred *Credential, req *http.Request) {
//Closure // Closure
targetQuery := target.RawQuery targetQuery := target.RawQuery
//Overwrite the request URL to the target path // Overwrite the request URL to the target path
req.URL.Scheme = target.Scheme req.URL.Scheme = target.Scheme
req.URL.Host = target.Host req.URL.Host = target.Host
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
@ -66,28 +66,28 @@ func director(target *url.URL, cred *Credential, req *http.Request) {
req.Header.Set("User-Agent", agentHarbor) req.Header.Set("User-Agent", agentHarbor)
} }
//Add authentication header if it is existing // Add authentication header if it is existing
if cred != nil { if cred != nil {
req.SetBasicAuth(cred.Username, cred.Password) req.SetBasicAuth(cred.Username, cred.Password)
} }
} }
//Modify the http response // Modify the http response
func modifyResponse(res *http.Response) error { func modifyResponse(res *http.Response) error {
//Accept cases // Accept cases
//Success or redirect // Success or redirect
if res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusTemporaryRedirect { if res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusTemporaryRedirect {
return nil return nil
} }
//Detect the 401 code, if it is,overwrite it to 500. // Detect the 401 code, if it is,overwrite it to 500.
//We also re-write the error content to structural error object // We also re-write the error content to structural error object
errorObj := make(map[string]string) errorObj := make(map[string]string)
if res.StatusCode == http.StatusUnauthorized { if res.StatusCode == http.StatusUnauthorized {
errorObj["error"] = "operation request from unauthorized source is rejected" errorObj["error"] = "operation request from unauthorized source is rejected"
res.StatusCode = http.StatusInternalServerError res.StatusCode = http.StatusInternalServerError
} else { } else {
//Extract the error and wrap it into the error object // Extract the error and wrap it into the error object
data, err := ioutil.ReadAll(res.Body) data, err := ioutil.ReadAll(res.Body)
if err != nil { if err != nil {
errorObj["error"] = fmt.Sprintf("%s: %s", res.Status, err.Error()) errorObj["error"] = fmt.Sprintf("%s: %s", res.Status, err.Error())
@ -112,8 +112,8 @@ func modifyResponse(res *http.Response) error {
return nil return nil
} }
//Join the path // Join the path
//Copy from the go reverse proxy // Copy from the go reverse proxy
func singleJoiningSlash(a, b string) string { func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/") aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/") bslash := strings.HasPrefix(b, "/")

View File

@ -14,19 +14,19 @@ const (
maxDeletionThreads = 10 maxDeletionThreads = 10
) )
//UtilityHandler provides utility methods // UtilityHandler provides utility methods
type UtilityHandler struct { type UtilityHandler struct {
//Parse and process the chart version to provide required info data // Parse and process the chart version to provide required info data
chartOperator *ChartOperator chartOperator *ChartOperator
//HTTP client used to call the realted APIs of the backend chart repositories // HTTP client used to call the realted APIs of the backend chart repositories
apiClient *ChartClient apiClient *ChartClient
//Point to the url of the backend server // Point to the url of the backend server
backendServerAddress *url.URL backendServerAddress *url.URL
} }
//GetChartsByNs gets the chart list under the namespace // GetChartsByNs gets the chart list under the namespace
func (uh *UtilityHandler) GetChartsByNs(namespace string) ([]*ChartInfo, error) { func (uh *UtilityHandler) GetChartsByNs(namespace string) ([]*ChartInfo, error) {
if len(strings.TrimSpace(namespace)) == 0 { if len(strings.TrimSpace(namespace)) == 0 {
return nil, errors.New("empty namespace when getting chart list") return nil, errors.New("empty namespace when getting chart list")
@ -43,7 +43,7 @@ func (uh *UtilityHandler) GetChartsByNs(namespace string) ([]*ChartInfo, error)
return uh.chartOperator.GetChartList(content) return uh.chartOperator.GetChartList(content)
} }
//DeleteChart deletes all the chart versions of the specified chart under the namespace. // DeleteChart deletes all the chart versions of the specified chart under the namespace.
func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error { func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
if len(strings.TrimSpace(namespace)) == 0 { if len(strings.TrimSpace(namespace)) == 0 {
return errors.New("empty namespace when deleting chart") return errors.New("empty namespace when deleting chart")
@ -66,8 +66,8 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
return err return err
} }
//Let's delete the versions in parallel // Let's delete the versions in parallel
//The number of goroutine is controlled by the const maxDeletionThreads // The number of goroutine is controlled by the const maxDeletionThreads
qSize := len(allVersions) qSize := len(allVersions)
if qSize > maxDeletionThreads { if qSize > maxDeletionThreads {
qSize = maxDeletionThreads qSize = maxDeletionThreads
@ -77,17 +77,17 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
waitGroup := new(sync.WaitGroup) waitGroup := new(sync.WaitGroup)
waitGroup.Add(len(allVersions)) waitGroup.Add(len(allVersions))
//Append initial tokens // Append initial tokens
for i := 0; i < qSize; i++ { for i := 0; i < qSize; i++ {
tokenQueue <- struct{}{} tokenQueue <- struct{}{}
} }
//Collect errors // Collect errors
errs := make([]error, 0) errs := make([]error, 0)
errWrapper := make(chan error, 1) errWrapper := make(chan error, 1)
go func() { go func() {
defer func() { defer func() {
//pass to the out func // pass to the out func
if len(errs) > 0 { if len(errs) > 0 {
errWrapper <- fmt.Errorf("%v", errs) errWrapper <- fmt.Errorf("%v", errs)
} }
@ -99,19 +99,19 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
} }
}() }()
//Schedule deletion tasks // Schedule deletion tasks
for _, deletingVersion := range allVersions { for _, deletingVersion := range allVersions {
//Apply for token first // Apply for token first
//If no available token, pending here // If no available token, pending here
<-tokenQueue <-tokenQueue
//Got one token // Got one token
go func(deletingVersion *helm_repo.ChartVersion) { go func(deletingVersion *helm_repo.ChartVersion) {
defer func() { defer func() {
//return the token back // return the token back
tokenQueue <- struct{}{} tokenQueue <- struct{}{}
//done // done
waitGroup.Done() waitGroup.Done()
}() }()
@ -121,9 +121,9 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
}(deletingVersion) }(deletingVersion)
} }
//Wait all goroutines are done // Wait all goroutines are done
waitGroup.Wait() waitGroup.Wait()
//Safe to quit error collection goroutine // Safe to quit error collection goroutine
close(errChan) close(errChan)
err = <-errWrapper err = <-errWrapper
@ -131,7 +131,7 @@ func (uh *UtilityHandler) DeleteChart(namespace, chartName string) error {
return err return err
} }
//deleteChartVersion deletes the specified chart version // deleteChartVersion deletes the specified chart version
func (uh *UtilityHandler) deleteChartVersion(namespace, chartName, version string) error { func (uh *UtilityHandler) deleteChartVersion(namespace, chartName, version string) error {
path := fmt.Sprintf("/api/%s/charts/%s/%s", namespace, chartName, version) path := fmt.Sprintf("/api/%s/charts/%s/%s", namespace, chartName, version)
url := fmt.Sprintf("%s%s", uh.backendServerAddress.String(), path) url := fmt.Sprintf("%s%s", uh.backendServerAddress.String(), path)

View File

@ -7,7 +7,7 @@ import (
"testing" "testing"
) )
//TestGetChartsByNs tests GetChartsByNs method in UtilityHandler // TestGetChartsByNs tests GetChartsByNs method in UtilityHandler
func TestGetChartsByNs(t *testing.T) { func TestGetChartsByNs(t *testing.T) {
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.RequestURI { switch r.RequestURI {
@ -43,7 +43,7 @@ func TestGetChartsByNs(t *testing.T) {
} }
} }
//Test the function DeleteChart // Test the function DeleteChart
func TestDeleteChart(t *testing.T) { func TestDeleteChart(t *testing.T) {
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.RequestURI { switch r.RequestURI {

View File

@ -14,7 +14,7 @@ const (
contentTypeJSON = "application/json" contentTypeJSON = "application/json"
) )
//WriteError writes error to http client // WriteError writes error to http client
func WriteError(w http.ResponseWriter, code int, err error) { func WriteError(w http.ResponseWriter, code int, err error) {
errorObj := make(map[string]string) errorObj := make(map[string]string)
errorObj["error"] = err.Error() errorObj["error"] = err.Error()
@ -26,20 +26,20 @@ func WriteError(w http.ResponseWriter, code int, err error) {
w.Write(errorContent) w.Write(errorContent)
} }
//WriteInternalError writes error with statusCode == 500 // WriteInternalError writes error with statusCode == 500
func WriteInternalError(w http.ResponseWriter, err error) { func WriteInternalError(w http.ResponseWriter, err error) {
WriteError(w, http.StatusInternalServerError, err) WriteError(w, http.StatusInternalServerError, err)
} }
//Write JSON data to http client // Write JSON data to http client
func writeJSONData(w http.ResponseWriter, data []byte) { func writeJSONData(w http.ResponseWriter, data []byte) {
w.Header().Set(contentTypeHeader, contentTypeJSON) w.Header().Set(contentTypeHeader, contentTypeJSON)
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
w.Write(data) w.Write(data)
} }
//Extract error object '{"error": "****---***"}' from the content if existing // Extract error object '{"error": "****---***"}' from the content if existing
//nil error will be returned if it does exist // nil error will be returned if it does exist
func extractError(content []byte) error { func extractError(content []byte) error {
if len(content) == 0 { if len(content) == 0 {
return nil return nil
@ -58,8 +58,8 @@ func extractError(content []byte) error {
return nil return nil
} }
//Parse the redis configuration to the beego cache pattern // Parse the redis configuration to the beego cache pattern
//Config pattern is "address:port[,weight,password,db_index]" // Config pattern is "address:port[,weight,password,db_index]"
func parseRedisConfig(redisConfigV string) (string, error) { func parseRedisConfig(redisConfigV string) (string, error) {
if len(redisConfigV) == 0 { if len(redisConfigV) == 0 {
return "", errors.New("empty redis config") return "", errors.New("empty redis config")
@ -68,47 +68,47 @@ func parseRedisConfig(redisConfigV string) (string, error) {
redisConfig := make(map[string]string) redisConfig := make(map[string]string)
redisConfig["key"] = cacheCollectionName redisConfig["key"] = cacheCollectionName
//Try best to parse the configuration segments. // Try best to parse the configuration segments.
//If the related parts are missing, assign default value. // If the related parts are missing, assign default value.
//The default database index for UI process is 0. // The default database index for UI process is 0.
configSegments := strings.Split(redisConfigV, ",") configSegments := strings.Split(redisConfigV, ",")
for i, segment := range configSegments { for i, segment := range configSegments {
if i > 3 { if i > 3 {
//ignore useless segments // ignore useless segments
break break
} }
switch i { switch i {
//address:port // address:port
case 0: case 0:
redisConfig["conn"] = segment redisConfig["conn"] = segment
//password, may not exist // password, may not exist
case 2: case 2:
redisConfig["password"] = segment redisConfig["password"] = segment
//database index, may not exist // database index, may not exist
case 3: case 3:
redisConfig["dbNum"] = segment redisConfig["dbNum"] = segment
} }
} }
//Assign default value // Assign default value
if len(redisConfig["dbNum"]) == 0 { if len(redisConfig["dbNum"]) == 0 {
redisConfig["dbNum"] = "0" redisConfig["dbNum"] = "0"
} }
//Try to validate the connection address // Try to validate the connection address
fullAddr := redisConfig["conn"] fullAddr := redisConfig["conn"]
if strings.Index(fullAddr, "://") == -1 { if strings.Index(fullAddr, "://") == -1 {
//Append schema // Append schema
fullAddr = fmt.Sprintf("redis://%s", fullAddr) fullAddr = fmt.Sprintf("redis://%s", fullAddr)
} }
//Validate it by url // Validate it by url
_, err := url.Parse(fullAddr) _, err := url.Parse(fullAddr)
if err != nil { if err != nil {
return "", err return "", err
} }
//Convert config map to string // Convert config map to string
cfgData, err := json.Marshal(redisConfig) cfgData, err := json.Marshal(redisConfig)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -5,21 +5,21 @@ import (
"testing" "testing"
) )
//Test the utility function parseRedisConfig // Test the utility function parseRedisConfig
func TestParseRedisConfig(t *testing.T) { func TestParseRedisConfig(t *testing.T) {
//Case 1: empty addr // Case 1: empty addr
redisAddr := "" redisAddr := ""
if _, err := parseRedisConfig(redisAddr); err == nil { if _, err := parseRedisConfig(redisAddr); err == nil {
t.Fatal("expect non nil error but got nil one if addr is empty") t.Fatal("expect non nil error but got nil one if addr is empty")
} }
//Case 2: short pattern, addr:port // Case 2: short pattern, addr:port
redisAddr = "redis:6379" redisAddr = "redis:6379"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConnStr) t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConnStr)
} }
//Case 3: long pattern but miss some parts // Case 3: long pattern but miss some parts
redisAddr = "redis:6379,100" redisAddr = "redis:6379,100"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %s\n", parsedConnStr) t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %s\n", parsedConnStr)
@ -29,7 +29,7 @@ func TestParseRedisConfig(t *testing.T) {
} }
} }
//Case 4: long pattern // Case 4: long pattern
redisAddr = "redis:6379,100,Passw0rd,1" redisAddr = "redis:6379,100,Passw0rd,1"
if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil {
t.Fatal("expect nil error but got non nil one if addr is long pattern") t.Fatal("expect nil error but got non nil one if addr is long pattern")

View File

@ -154,7 +154,7 @@ var (
ReadOnly, ReadOnly,
} }
//value is default value // value is default value
HarborStringKeysMap = map[string]string{ HarborStringKeysMap = map[string]string{
AUTHMode: "db_auth", AUTHMode: "db_auth",
LDAPURL: "", LDAPURL: "",

View File

@ -38,7 +38,7 @@ func GetTotalOfAccessLogs(query *models.LogQueryParam) (int64, error) {
return logQueryConditions(query).Count() return logQueryConditions(query).Count()
} }
//GetAccessLogs gets access logs according to different conditions // GetAccessLogs gets access logs according to different conditions
func GetAccessLogs(query *models.LogQueryParam) ([]models.AccessLog, error) { func GetAccessLogs(query *models.LogQueryParam) ([]models.AccessLog, error) {
qs := logQueryConditions(query).OrderBy("-op_time") qs := logQueryConditions(query).OrderBy("-op_time")

View File

@ -131,7 +131,7 @@ func ClearTable(table string) error {
if table == models.UserTable { if table == models.UserTable {
sql = fmt.Sprintf("delete from %s where user_id > 2", table) sql = fmt.Sprintf("delete from %s where user_id > 2", table)
} }
if table == "project_metadata" { //make sure library is public if table == "project_metadata" { // make sure library is public
sql = fmt.Sprintf("delete from %s where id > 1", table) sql = fmt.Sprintf("delete from %s where id > 1", table)
} }
_, err := o.Raw(sql).Exec() _, err := o.Raw(sql).Exec()
@ -152,7 +152,7 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter
return qs return qs
} }
//Escape .. // Escape ..
func Escape(str string) string { func Escape(str string) string {
str = strings.Replace(str, `%`, `\%`, -1) str = strings.Replace(str, `%`, `\%`, -1)
str = strings.Replace(str, `_`, `\_`, -1) str = strings.Replace(str, `_`, `\_`, -1)

View File

@ -21,7 +21,7 @@ import (
"time" "time"
) )
//SetClairVulnTimestamp update the last_update of a namespace. If there's no record for this namespace, one will be created. // SetClairVulnTimestamp update the last_update of a namespace. If there's no record for this namespace, one will be created.
func SetClairVulnTimestamp(namespace string, timestamp time.Time) error { func SetClairVulnTimestamp(namespace string, timestamp time.Time) error {
o := GetOrmer() o := GetOrmer()
rec := &models.ClairVulnTimestamp{ rec := &models.ClairVulnTimestamp{
@ -43,7 +43,7 @@ func SetClairVulnTimestamp(namespace string, timestamp time.Time) error {
return nil return nil
} }
//ListClairVulnTimestamps return a list of all records in vuln timestamp table. // ListClairVulnTimestamps return a list of all records in vuln timestamp table.
func ListClairVulnTimestamps() ([]*models.ClairVulnTimestamp, error) { func ListClairVulnTimestamps() ([]*models.ClairVulnTimestamp, error) {
var res []*models.ClairVulnTimestamp var res []*models.ClairVulnTimestamp
o := GetOrmer() o := GetOrmer()

View File

@ -32,7 +32,7 @@ var (
once sync.Once once sync.Once
) )
//GetOrmer return the singleton of Ormer for clair DB. // GetOrmer return the singleton of Ormer for clair DB.
func GetOrmer() orm.Ormer { func GetOrmer() orm.Ormer {
once.Do(func() { once.Do(func() {
dbInstance, err := orm.GetDB(dao.ClairDBAlias) dbInstance, err := orm.GetDB(dao.ClairDBAlias)
@ -47,7 +47,7 @@ func GetOrmer() orm.Ormer {
return ormer return ormer
} }
//GetLastUpdate query the table `keyvalue` in clair's DB return the value of `updater/last` // GetLastUpdate query the table `keyvalue` in clair's DB return the value of `updater/last`
func GetLastUpdate() (int64, error) { func GetLastUpdate() (int64, error) {
var list orm.ParamsList var list orm.ParamsList
num, err := GetOrmer().Raw("SELECT value from keyvalue where key=?", updaterLast).ValuesFlat(&list) num, err := GetOrmer().Raw("SELECT value from keyvalue where key=?", updaterLast).ValuesFlat(&list)
@ -60,7 +60,7 @@ func GetLastUpdate() (int64, error) {
return 0, fmt.Errorf("The value: %v, is non-string", list[0]) return 0, fmt.Errorf("The value: %v, is non-string", list[0])
} }
res, err := strconv.ParseInt(s, 0, 64) res, err := strconv.ParseInt(s, 0, 64)
if err != nil { //shouldn't be here. if err != nil { // shouldn't be here.
return 0, err return 0, err
} }
return res, nil return res, nil
@ -68,6 +68,6 @@ func GetLastUpdate() (int64, error) {
if num > 1 { if num > 1 {
return 0, fmt.Errorf("Multiple entries for %s in Clair DB", updaterLast) return 0, fmt.Errorf("Multiple entries for %s in Clair DB", updaterLast)
} }
//num is zero, it's not updated yet. // num is zero, it's not updated yet.
return 0, nil return 0, nil
} }

View File

@ -187,7 +187,7 @@ func TestRegister(t *testing.T) {
t.Errorf("Error occurred in Register: %v", err) t.Errorf("Error occurred in Register: %v", err)
} }
//Check if user registered successfully. // Check if user registered successfully.
queryUser := models.User{ queryUser := models.User{
Username: username, Username: username,
} }
@ -567,7 +567,7 @@ func TestGetUserProjectRoles(t *testing.T) {
t.Errorf("Error happened in GetUserProjectRole: %v, userID: %+v, project Id: %d", err, currentUser.UserID, currentProject.ProjectID) t.Errorf("Error happened in GetUserProjectRole: %v, userID: %+v, project Id: %d", err, currentUser.UserID, currentProject.ProjectID)
} }
//Get the size of current user project role. // Get the size of current user project role.
if len(r) != 1 { if len(r) != 1 {
t.Errorf("The user, id: %d, should only have one role in project, id: %d, but actual: %d", currentUser.UserID, currentProject.ProjectID, len(r)) t.Errorf("The user, id: %d, should only have one role in project, id: %d, but actual: %d", currentUser.UserID, currentProject.ProjectID, len(r))
} }
@ -675,7 +675,7 @@ func TestAddRepTarget(t *testing.T) {
Username: "admin", Username: "admin",
Password: "admin", Password: "admin",
} }
//_, err := AddRepTarget(target) // _, err := AddRepTarget(target)
id, err := AddRepTarget(target) id, err := AddRepTarget(target)
t.Logf("added target, id: %d", id) t.Logf("added target, id: %d", id)
if err != nil { if err != nil {

View File

@ -91,7 +91,7 @@ func DeleteUserGroup(id int) error {
o := dao.GetOrmer() o := dao.GetOrmer()
_, err := o.Delete(&userGroup) _, err := o.Delete(&userGroup)
if err == nil { if err == nil {
//Delete all related project members // Delete all related project members
sql := `delete from project_member where entity_id = ? and entity_type='g'` sql := `delete from project_member where entity_id = ? and entity_type='g'`
_, err := o.Raw(sql, id).Exec() _, err := o.Raw(sql, id).Exec()
if err != nil { if err != nil {
@ -147,7 +147,7 @@ func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string {
count++ count++
} }
} }
//No LDAP Group found // No LDAP Group found
if count == 0 { if count == 0 {
return "" return ""
} }

View File

@ -30,7 +30,7 @@ var createdUserGroupID int
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
//databases := []string{"mysql", "sqlite"} // databases := []string{"mysql", "sqlite"}
databases := []string{"postgresql"} databases := []string{"postgresql"}
for _, database := range databases { for _, database := range databases {
log.Infof("run test cases for database: %s", database) log.Infof("run test cases for database: %s", database)
@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
log.Fatalf("invalid database: %s", database) log.Fatalf("invalid database: %s", database)
} }
//Extract to test utils // Extract to test utils
initSqls := []string{ initSqls := []string{
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')", "insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into project (name, owner_id) values ('member_test_01', 1)", "insert into project (name, owner_id) values ('member_test_01', 1)",

View File

@ -19,7 +19,7 @@ import (
"time" "time"
"github.com/astaxie/beego/orm" "github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql" //register mysql driver _ "github.com/go-sql-driver/mysql" // register mysql driver
"github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils"
) )

View File

@ -20,12 +20,12 @@ import (
"github.com/astaxie/beego/orm" "github.com/astaxie/beego/orm"
"github.com/golang-migrate/migrate" "github.com/golang-migrate/migrate"
_ "github.com/golang-migrate/migrate/database/postgres" //import pgsql driver for migrator _ "github.com/golang-migrate/migrate/database/postgres" // import pgsql driver for migrator
_ "github.com/golang-migrate/migrate/source/file" // import local file driver for migrator _ "github.com/golang-migrate/migrate/source/file" // import local file driver for migrator
"github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/log"
_ "github.com/lib/pq" //register pgsql driver _ "github.com/lib/pq" // register pgsql driver
) )
const defaultMigrationPath = "migrations/postgresql/" const defaultMigrationPath = "migrations/postgresql/"
@ -71,7 +71,7 @@ func NewPGSQL(host string, port string, usr string, pwd string, database string,
} }
} }
//Register registers pgSQL to orm with the info wrapped by the instance. // Register registers pgSQL to orm with the info wrapped by the instance.
func (p *pgsql) Register(alias ...string) error { func (p *pgsql) Register(alias ...string) error {
if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil { if err := utils.TestTCPConn(fmt.Sprintf("%s:%s", p.host, p.port), 60, 2); err != nil {
return err return err
@ -91,10 +91,10 @@ func (p *pgsql) Register(alias ...string) error {
return orm.RegisterDataBase(an, "postgres", info) return orm.RegisterDataBase(an, "postgres", info)
} }
//UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts. // UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
func (p *pgsql) UpgradeSchema() error { func (p *pgsql) UpgradeSchema() error {
dbURL := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", p.usr, p.pwd, p.host, p.port, p.database, pgsqlSSLMode(p.sslmode)) dbURL := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=%s", p.usr, p.pwd, p.host, p.port, p.database, pgsqlSSLMode(p.sslmode))
//For UT // For UT
path := os.Getenv("POSTGRES_MIGRATION_SCRIPTS_PATH") path := os.Getenv("POSTGRES_MIGRATION_SCRIPTS_PATH")
if len(path) == 0 { if len(path) == 0 {
path = defaultMigrationPath path = defaultMigrationPath
@ -114,7 +114,7 @@ func (p *pgsql) UpgradeSchema() error {
err = m.Up() err = m.Up()
if err == migrate.ErrNoChange { if err == migrate.ErrNoChange {
log.Infof("No change in schema, skip.") log.Infof("No change in schema, skip.")
} else if err != nil { //migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout. } else if err != nil { // migrate.ErrLockTimeout will be thrown when another process is doing migration and timeout.
log.Errorf("Failed to upgrade schema, error: %q", err) log.Errorf("Failed to upgrade schema, error: %q", err)
return err return err
} }

View File

@ -292,8 +292,8 @@ func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error
return roles, nil return roles, nil
} }
o := GetOrmer() o := GetOrmer()
//Because an LDAP user can be memberof multiple groups, // Because an LDAP user can be memberof multiple groups,
//the role is in descent order (1-admin, 2-developer, 3-guest), use min to select the max privilege role. // the role is in descent order (1-admin, 2-developer, 3-guest), use min to select the max privilege role.
sql := fmt.Sprintf( sql := fmt.Sprintf(
`select min(pm.role) from project_member pm `select min(pm.role) from project_member pm
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
@ -304,7 +304,7 @@ func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error
log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err) log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err)
return nil, err return nil, err
} }
//If there is no row selected, the min returns an empty row, to avoid return 0 as role // If there is no row selected, the min returns an empty row, to avoid return 0 as role
if len(roles) == 1 && roles[0] == 0 { if len(roles) == 1 && roles[0] == 0 {
return []int{}, nil return []int{}, nil
} }

View File

@ -30,7 +30,7 @@ import (
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
//databases := []string{"mysql", "sqlite"} // databases := []string{"mysql", "sqlite"}
databases := []string{"postgresql"} databases := []string{"postgresql"}
for _, database := range databases { for _, database := range databases {
log.Infof("run test cases for database: %s", database) log.Infof("run test cases for database: %s", database)
@ -43,7 +43,7 @@ func TestMain(m *testing.M) {
log.Fatalf("invalid database: %s", database) log.Fatalf("invalid database: %s", database)
} }
//Extract to test utils // Extract to test utils
initSqls := []string{ initSqls := []string{
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')", "insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
"insert into project (name, owner_id) values ('member_test_01', 1)", "insert into project (name, owner_id) values ('member_test_01', 1)",

View File

@ -91,13 +91,13 @@ func IncreasePullCount(name string) (err error) {
return nil return nil
} }
//RepositoryExists returns whether the repository exists according to its name. // RepositoryExists returns whether the repository exists according to its name.
func RepositoryExists(name string) bool { func RepositoryExists(name string) bool {
o := GetOrmer() o := GetOrmer()
return o.QueryTable("repository").Filter("name", name).Exist() return o.QueryTable("repository").Filter("name", name).Exist()
} }
//GetTopRepos returns the most popular repositories whose project ID is // GetTopRepos returns the most popular repositories whose project ID is
// in projectIDs // in projectIDs
func GetTopRepos(projectIDs []int64, n int) ([]*models.RepoRecord, error) { func GetTopRepos(projectIDs []int64, n int) ([]*models.RepoRecord, error) {
repositories := []*models.RepoRecord{} repositories := []*models.RepoRecord{}

View File

@ -18,7 +18,7 @@ import (
"fmt" "fmt"
"github.com/astaxie/beego/orm" "github.com/astaxie/beego/orm"
_ "github.com/mattn/go-sqlite3" //register sqlite driver _ "github.com/mattn/go-sqlite3" // register sqlite driver
) )
type sqlite struct { type sqlite struct {

View File

@ -94,7 +94,7 @@ func LoginByDb(auth models.AuthModel) (*models.User, error) {
return nil, nil return nil, nil
} }
user.Password = "" //do not return the password user.Password = "" // do not return the password
return &user, nil return &user, nil
} }
@ -244,7 +244,7 @@ func OnBoardUser(u *models.User) error {
return nil return nil
} }
//IsSuperUser checks if the user is super user(conventionally id == 1) of Harbor // IsSuperUser checks if the user is super user(conventionally id == 1) of Harbor
func IsSuperUser(username string) bool { func IsSuperUser(username string) bool {
u, err := GetUser(models.User{ u, err := GetUser(models.User{
Username: username, Username: username,
@ -257,7 +257,7 @@ func IsSuperUser(username string) bool {
return u != nil && u.UserID == 1 return u != nil && u.UserID == 1
} }
//CleanUser - Clean this user information from DB // CleanUser - Clean this user information from DB
func CleanUser(id int64) error { func CleanUser(id int64) error {
if _, err := GetOrmer().QueryTable(&models.User{}). if _, err := GetOrmer().QueryTable(&models.User{}).
Filter("UserID", id).Delete(); err != nil { Filter("UserID", id).Delete(); err != nil {

View File

@ -17,7 +17,7 @@ type Client interface {
SubmitJob(*models.JobData) (string, error) SubmitJob(*models.JobData) (string, error)
GetJobLog(uuid string) ([]byte, error) GetJobLog(uuid string) ([]byte, error)
PostAction(uuid, action string) error PostAction(uuid, action string) error
//TODO Redirect joblog when we see there's memory issue. // TODO Redirect joblog when we see there's memory issue.
} }
// DefaultClient is the default implementation of Client interface // DefaultClient is the default implementation of Client interface
@ -41,7 +41,7 @@ func NewDefaultClient(endpoint, secret string) *DefaultClient {
} }
} }
//SubmitJob call jobserivce API to submit a job and returns the job's UUID. // SubmitJob call jobserivce API to submit a job and returns the job's UUID.
func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) { func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
url := d.endpoint + "/api/v1/jobs" url := d.endpoint + "/api/v1/jobs"
jq := models.JobRequest{ jq := models.JobRequest{
@ -78,7 +78,7 @@ func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
return stats.Stats.JobID, nil return stats.Stats.JobID, nil
} }
//GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job // GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job
func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) { func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) {
url := d.endpoint + "/api/v1/jobs/" + uuid + "/log" url := d.endpoint + "/api/v1/jobs/" + uuid + "/log"
req, err := http.NewRequest(http.MethodGet, url, nil) req, err := http.NewRequest(http.MethodGet, url, nil)

View File

@ -1,7 +1,7 @@
package job package job
const ( const (
//ImageScanJob is name of scan job it will be used as key to register to job service. // ImageScanJob is name of scan job it will be used as key to register to job service.
ImageScanJob = "IMAGE_SCAN" ImageScanJob = "IMAGE_SCAN"
// ImageTransfer : the name of image transfer job in job service // ImageTransfer : the name of image transfer job in job service
ImageTransfer = "IMAGE_TRANSFER" ImageTransfer = "IMAGE_TRANSFER"
@ -12,26 +12,26 @@ const (
// ImageGC the name of image garbage collection job in job service // ImageGC the name of image garbage collection job in job service
ImageGC = "IMAGE_GC" ImageGC = "IMAGE_GC"
//JobKindGeneric : Kind of generic job // JobKindGeneric : Kind of generic job
JobKindGeneric = "Generic" JobKindGeneric = "Generic"
//JobKindScheduled : Kind of scheduled job // JobKindScheduled : Kind of scheduled job
JobKindScheduled = "Scheduled" JobKindScheduled = "Scheduled"
//JobKindPeriodic : Kind of periodic job // JobKindPeriodic : Kind of periodic job
JobKindPeriodic = "Periodic" JobKindPeriodic = "Periodic"
//JobServiceStatusPending : job status pending // JobServiceStatusPending : job status pending
JobServiceStatusPending = "Pending" JobServiceStatusPending = "Pending"
//JobServiceStatusRunning : job status running // JobServiceStatusRunning : job status running
JobServiceStatusRunning = "Running" JobServiceStatusRunning = "Running"
//JobServiceStatusStopped : job status stopped // JobServiceStatusStopped : job status stopped
JobServiceStatusStopped = "Stopped" JobServiceStatusStopped = "Stopped"
//JobServiceStatusCancelled : job status cancelled // JobServiceStatusCancelled : job status cancelled
JobServiceStatusCancelled = "Cancelled" JobServiceStatusCancelled = "Cancelled"
//JobServiceStatusError : job status error // JobServiceStatusError : job status error
JobServiceStatusError = "Error" JobServiceStatusError = "Error"
//JobServiceStatusSuccess : job status success // JobServiceStatusSuccess : job status success
JobServiceStatusSuccess = "Success" JobServiceStatusSuccess = "Success"
//JobServiceStatusScheduled : job status scheduled // JobServiceStatusScheduled : job status scheduled
JobServiceStatusScheduled = "Scheduled" JobServiceStatusScheduled = "Scheduled"
// JobActionStop : the action to stop the job // JobActionStop : the action to stop the job

View File

@ -2,15 +2,15 @@
package models package models
//Parameters for job execution. // Parameters for job execution.
type Parameters map[string]interface{} type Parameters map[string]interface{}
//JobRequest is the request of launching a job. // JobRequest is the request of launching a job.
type JobRequest struct { type JobRequest struct {
Job *JobData `json:"job"` Job *JobData `json:"job"`
} }
//JobData keeps the basic info. // JobData keeps the basic info.
type JobData struct { type JobData struct {
Name string `json:"name"` Name string `json:"name"`
Parameters Parameters `json:"parameters"` Parameters Parameters `json:"parameters"`
@ -18,7 +18,7 @@ type JobData struct {
StatusHook string `json:"status_hook"` StatusHook string `json:"status_hook"`
} }
//JobMetadata stores the metadata of job. // JobMetadata stores the metadata of job.
type JobMetadata struct { type JobMetadata struct {
JobKind string `json:"kind"` JobKind string `json:"kind"`
ScheduleDelay uint64 `json:"schedule_delay,omitempty"` ScheduleDelay uint64 `json:"schedule_delay,omitempty"`
@ -26,12 +26,12 @@ type JobMetadata struct {
IsUnique bool `json:"unique"` IsUnique bool `json:"unique"`
} }
//JobStats keeps the result of job launching. // JobStats keeps the result of job launching.
type JobStats struct { type JobStats struct {
Stats *JobStatData `json:"job"` Stats *JobStatData `json:"job"`
} }
//JobStatData keeps the stats of job // JobStatData keeps the stats of job
type JobStatData struct { type JobStatData struct {
JobID string `json:"id"` JobID string `json:"id"`
Status string `json:"status"` Status string `json:"status"`
@ -49,12 +49,12 @@ type JobStatData struct {
HookStatus string `json:"hook_status,omitempty"` HookStatus string `json:"hook_status,omitempty"`
} }
//JobPoolStats represents the healthy and status of all the running worker pools. // JobPoolStats represents the healthy and status of all the running worker pools.
type JobPoolStats struct { type JobPoolStats struct {
Pools []*JobPoolStatsData `json:"worker_pools"` Pools []*JobPoolStatsData `json:"worker_pools"`
} }
//JobPoolStatsData represent the healthy and status of the worker pool. // JobPoolStatsData represent the healthy and status of the worker pool.
type JobPoolStatsData struct { type JobPoolStatsData struct {
WorkerPoolID string `json:"worker_pool_id"` WorkerPoolID string `json:"worker_pool_id"`
StartedAt int64 `json:"started_at"` StartedAt int64 `json:"started_at"`
@ -64,20 +64,20 @@ type JobPoolStatsData struct {
Status string `json:"status"` Status string `json:"status"`
} }
//JobActionRequest defines for triggering job action like stop/cancel. // JobActionRequest defines for triggering job action like stop/cancel.
type JobActionRequest struct { type JobActionRequest struct {
Action string `json:"action"` Action string `json:"action"`
} }
//JobStatusChange is designed for reporting the status change via hook. // JobStatusChange is designed for reporting the status change via hook.
type JobStatusChange struct { type JobStatusChange struct {
JobID string `json:"job_id"` JobID string `json:"job_id"`
Status string `json:"status"` Status string `json:"status"`
CheckIn string `json:"check_in,omitempty"` CheckIn string `json:"check_in,omitempty"`
} }
//Message is designed for sub/pub messages // Message is designed for sub/pub messages
type Message struct { type Message struct {
Event string Event string
Data interface{} //generic format Data interface{} // generic format
} }

View File

@ -19,7 +19,7 @@ import (
) )
const ( const (
//AdminJobTable is table name for admin job // AdminJobTable is table name for admin job
AdminJobTable = "admin_job" AdminJobTable = "admin_job"
) )
@ -36,7 +36,7 @@ type AdminJob struct {
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
} }
//TableName is required by by beego orm to map AdminJob to table AdminJob // TableName is required by by beego orm to map AdminJob to table AdminJob
func (a *AdminJob) TableName() string { func (a *AdminJob) TableName() string {
return AdminJobTable return AdminJobTable
} }

View File

@ -29,12 +29,12 @@ type ClairVulnTimestamp struct {
LastUpdateUTC int64 `orm:"-" json:"last_update"` LastUpdateUTC int64 `orm:"-" json:"last_update"`
} }
//TableName is required by beego to map struct to table. // TableName is required by beego to map struct to table.
func (ct *ClairVulnTimestamp) TableName() string { func (ct *ClairVulnTimestamp) TableName() string {
return ClairVulnTimestampTable return ClairVulnTimestampTable
} }
//ClairLayer ... // ClairLayer ...
type ClairLayer struct { type ClairLayer struct {
Name string `json:"Name,omitempty"` Name string `json:"Name,omitempty"`
NamespaceNames []string `json:"NamespaceNames,omitempty"` NamespaceNames []string `json:"NamespaceNames,omitempty"`
@ -45,7 +45,7 @@ type ClairLayer struct {
Features []ClairFeature `json:"Features,omitempty"` Features []ClairFeature `json:"Features,omitempty"`
} }
//ClairFeature ... // ClairFeature ...
type ClairFeature struct { type ClairFeature struct {
Name string `json:"Name,omitempty"` Name string `json:"Name,omitempty"`
NamespaceName string `json:"NamespaceName,omitempty"` NamespaceName string `json:"NamespaceName,omitempty"`
@ -55,7 +55,7 @@ type ClairFeature struct {
AddedBy string `json:"AddedBy,omitempty"` AddedBy string `json:"AddedBy,omitempty"`
} }
//ClairVulnerability ... // ClairVulnerability ...
type ClairVulnerability struct { type ClairVulnerability struct {
Name string `json:"Name,omitempty"` Name string `json:"Name,omitempty"`
NamespaceName string `json:"NamespaceName,omitempty"` NamespaceName string `json:"NamespaceName,omitempty"`
@ -67,18 +67,18 @@ type ClairVulnerability struct {
FixedIn []ClairFeature `json:"FixedIn,omitempty"` FixedIn []ClairFeature `json:"FixedIn,omitempty"`
} }
//ClairError ... // ClairError ...
type ClairError struct { type ClairError struct {
Message string `json:"Message,omitempty"` Message string `json:"Message,omitempty"`
} }
//ClairLayerEnvelope ... // ClairLayerEnvelope ...
type ClairLayerEnvelope struct { type ClairLayerEnvelope struct {
Layer *ClairLayer `json:"Layer,omitempty"` Layer *ClairLayer `json:"Layer,omitempty"`
Error *ClairError `json:"Error,omitempty"` Error *ClairError `json:"Error,omitempty"`
} }
//ClairNotification ... // ClairNotification ...
type ClairNotification struct { type ClairNotification struct {
Name string `json:"Name,omitempty"` Name string `json:"Name,omitempty"`
Created string `json:"Created,omitempty"` Created string `json:"Created,omitempty"`
@ -91,45 +91,45 @@ type ClairNotification struct {
New *ClairVulnerabilityWithLayers `json:"New,omitempty"` New *ClairVulnerabilityWithLayers `json:"New,omitempty"`
} }
//ClairNotificationEnvelope ... // ClairNotificationEnvelope ...
type ClairNotificationEnvelope struct { type ClairNotificationEnvelope struct {
Notification *ClairNotification `json:"Notification,omitempty"` Notification *ClairNotification `json:"Notification,omitempty"`
Error *ClairError `json:"Error,omitempty"` Error *ClairError `json:"Error,omitempty"`
} }
//ClairVulnerabilityWithLayers ... // ClairVulnerabilityWithLayers ...
type ClairVulnerabilityWithLayers struct { type ClairVulnerabilityWithLayers struct {
Vulnerability *ClairVulnerability `json:"Vulnerability,omitempty"` Vulnerability *ClairVulnerability `json:"Vulnerability,omitempty"`
OrderedLayersIntroducingVulnerability []ClairOrderedLayerName `json:"OrderedLayersIntroducingVulnerability,omitempty"` OrderedLayersIntroducingVulnerability []ClairOrderedLayerName `json:"OrderedLayersIntroducingVulnerability,omitempty"`
} }
//ClairOrderedLayerName ... // ClairOrderedLayerName ...
type ClairOrderedLayerName struct { type ClairOrderedLayerName struct {
Index int `json:"Index"` Index int `json:"Index"`
LayerName string `json:"LayerName"` LayerName string `json:"LayerName"`
} }
//ClairVulnerabilityStatus reflects the readiness and freshness of vulnerability data in Clair, // ClairVulnerabilityStatus reflects the readiness and freshness of vulnerability data in Clair,
//which will be returned in response of systeminfo API. // which will be returned in response of systeminfo API.
type ClairVulnerabilityStatus struct { type ClairVulnerabilityStatus struct {
OverallUTC int64 `json:"overall_last_update,omitempty"` OverallUTC int64 `json:"overall_last_update,omitempty"`
Details []ClairNamespaceTimestamp `json:"details,omitempty"` Details []ClairNamespaceTimestamp `json:"details,omitempty"`
} }
//ClairNamespaceTimestamp is a record to store the clairname space and the timestamp, // ClairNamespaceTimestamp is a record to store the clairname space and the timestamp,
//in practice different namespace in Clair maybe merged into one, e.g. ubuntu:14.04 and ubuntu:16.4 maybe merged into ubuntu and put into response. // in practice different namespace in Clair maybe merged into one, e.g. ubuntu:14.04 and ubuntu:16.4 maybe merged into ubuntu and put into response.
type ClairNamespaceTimestamp struct { type ClairNamespaceTimestamp struct {
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
Timestamp int64 `json:"last_update"` Timestamp int64 `json:"last_update"`
} }
//ClairNamespace ... // ClairNamespace ...
type ClairNamespace struct { type ClairNamespace struct {
Name string `json:"Name,omitempty"` Name string `json:"Name,omitempty"`
VersionFormat string `json:"VersionFormat,omitempty"` VersionFormat string `json:"VersionFormat,omitempty"`
} }
//ClairNamespaceEnvelope ... // ClairNamespaceEnvelope ...
type ClairNamespaceEnvelope struct { type ClairNamespaceEnvelope struct {
Namespaces *[]ClairNamespace `json:"Namespaces,omitempty"` Namespaces *[]ClairNamespace `json:"Namespaces,omitempty"`
Error *ClairError `json:"Error,omitempty"` Error *ClairError `json:"Error,omitempty"`

View File

@ -15,21 +15,21 @@
package models package models
const ( const (
//JobPending ... // JobPending ...
JobPending string = "pending" JobPending string = "pending"
//JobRunning ... // JobRunning ...
JobRunning string = "running" JobRunning string = "running"
//JobError ... // JobError ...
JobError string = "error" JobError string = "error"
//JobStopped ... // JobStopped ...
JobStopped string = "stopped" JobStopped string = "stopped"
//JobFinished ... // JobFinished ...
JobFinished string = "finished" JobFinished string = "finished"
//JobCanceled ... // JobCanceled ...
JobCanceled string = "canceled" JobCanceled string = "canceled"
//JobRetrying indicate the job needs to be retried, it will be scheduled to the end of job queue by statemachine after an interval. // JobRetrying indicate the job needs to be retried, it will be scheduled to the end of job queue by statemachine after an interval.
JobRetrying string = "retrying" JobRetrying string = "retrying"
//JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on trasition table. // JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on trasition table.
JobContinue string = "_continue" JobContinue string = "_continue"
// JobScheduled ... // JobScheduled ...
JobScheduled string = "scheduled" JobScheduled string = "scheduled"

View File

@ -45,7 +45,7 @@ type LdapUser struct {
GroupDNList []string `json:"ldap_groupdn"` GroupDNList []string `json:"ldap_groupdn"`
} }
//LdapImportUser ... // LdapImportUser ...
type LdapImportUser struct { type LdapImportUser struct {
LdapUIDList []string `json:"ldap_uid_list"` LdapUIDList []string `json:"ldap_uid_list"`
} }

View File

@ -22,7 +22,7 @@ import (
const ( const (
ProMetaPublic = "public" ProMetaPublic = "public"
ProMetaEnableContentTrust = "enable_content_trust" ProMetaEnableContentTrust = "enable_content_trust"
ProMetaPreventVul = "prevent_vul" //prevent vulnerable images from being pulled ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled
ProMetaSeverity = "severity" ProMetaSeverity = "severity"
ProMetaAutoScan = "auto_scan" ProMetaAutoScan = "auto_scan"
SeverityNone = "negligible" SeverityNone = "negligible"

View File

@ -154,7 +154,7 @@ type BaseProjectCollection struct {
// ProjectRequest holds informations that need for creating project API // ProjectRequest holds informations that need for creating project API
type ProjectRequest struct { type ProjectRequest struct {
Name string `json:"project_name"` Name string `json:"project_name"`
Public *int `json:"public"` //deprecated, reserved for project creation in replication Public *int `json:"public"` // deprecated, reserved for project creation in replication
Metadata map[string]string `json:"metadata"` Metadata map[string]string `json:"metadata"`
} }
@ -164,7 +164,7 @@ type ProjectQueryResult struct {
Projects []*Project Projects []*Project
} }
//TableName is required by beego orm to map Project to table project // TableName is required by beego orm to map Project to table project
func (p *Project) TableName() string { func (p *Project) TableName() string {
return ProjectTable return ProjectTable
} }

View File

@ -22,17 +22,17 @@ import (
) )
const ( const (
//RepOpTransfer represents the operation of a job to transfer repository to a remote registry/harbor instance. // RepOpTransfer represents the operation of a job to transfer repository to a remote registry/harbor instance.
RepOpTransfer string = "transfer" RepOpTransfer string = "transfer"
//RepOpDelete represents the operation of a job to remove repository from a remote registry/harbor instance. // RepOpDelete represents the operation of a job to remove repository from a remote registry/harbor instance.
RepOpDelete string = "delete" RepOpDelete string = "delete"
//RepOpSchedule represents the operation of a job to schedule the real replication process // RepOpSchedule represents the operation of a job to schedule the real replication process
RepOpSchedule string = "schedule" RepOpSchedule string = "schedule"
//RepTargetTable is the table name for replication targets // RepTargetTable is the table name for replication targets
RepTargetTable = "replication_target" RepTargetTable = "replication_target"
//RepJobTable is the table name for replication jobs // RepJobTable is the table name for replication jobs
RepJobTable = "replication_job" RepJobTable = "replication_job"
//RepPolicyTable is table name for replication policies // RepPolicyTable is table name for replication policies
RepPolicyTable = "replication_policy" RepPolicyTable = "replication_policy"
) )
@ -108,17 +108,17 @@ func (r *RepTarget) Valid(v *validation.Validation) {
} }
} }
//TableName is required by by beego orm to map RepTarget to table replication_target // TableName is required by by beego orm to map RepTarget to table replication_target
func (r *RepTarget) TableName() string { func (r *RepTarget) TableName() string {
return RepTargetTable return RepTargetTable
} }
//TableName is required by by beego orm to map RepJob to table replication_job // TableName is required by by beego orm to map RepJob to table replication_job
func (r *RepJob) TableName() string { func (r *RepJob) TableName() string {
return RepJobTable return RepJobTable
} }
//TableName is required by by beego orm to map RepPolicy to table replication_policy // TableName is required by by beego orm to map RepPolicy to table replication_policy
func (r *RepPolicy) TableName() string { func (r *RepPolicy) TableName() string {
return RepPolicyTable return RepPolicyTable
} }

View File

@ -18,7 +18,7 @@ import (
"time" "time"
) )
//RepoTable is the table name for repository // RepoTable is the table name for repository
const RepoTable = "repository" const RepoTable = "repository"
// RepoRecord holds the record of an repository in DB, all the infors are from the registry notification event. // RepoRecord holds the record of an repository in DB, all the infors are from the registry notification event.
@ -33,7 +33,7 @@ type RepoRecord struct {
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
} }
//TableName is required by by beego orm to map RepoRecord to table repository // TableName is required by by beego orm to map RepoRecord to table repository
func (rp *RepoRecord) TableName() string { func (rp *RepoRecord) TableName() string {
return RepoTable return RepoTable
} }

View File

@ -15,11 +15,11 @@
package models package models
const ( const (
//PROJECTADMIN project administrator // PROJECTADMIN project administrator
PROJECTADMIN = 1 PROJECTADMIN = 1
//DEVELOPER developer // DEVELOPER developer
DEVELOPER = 2 DEVELOPER = 2
//GUEST guest // GUEST guest
GUEST = 3 GUEST = 3
) )

View File

@ -16,13 +16,13 @@ package models
import "time" import "time"
//ScanJobTable is the name of the table whose data is mapped by ScanJob struct. // ScanJobTable is the name of the table whose data is mapped by ScanJob struct.
const ScanJobTable = "img_scan_job" const ScanJobTable = "img_scan_job"
//ScanOverviewTable is the name of the table whose data is mapped by ImgScanOverview struct. // ScanOverviewTable is the name of the table whose data is mapped by ImgScanOverview struct.
const ScanOverviewTable = "img_scan_overview" const ScanOverviewTable = "img_scan_overview"
//ScanJob is the model to represent a job for image scan in DB. // ScanJob is the model to represent a job for image scan in DB.
type ScanJob struct { type ScanJob struct {
ID int64 `orm:"pk;auto;column(id)" json:"id"` ID int64 `orm:"pk;auto;column(id)" json:"id"`
Status string `orm:"column(status)" json:"status"` Status string `orm:"column(status)" json:"status"`
@ -47,7 +47,7 @@ const (
SevHigh SevHigh
) )
//String is the output function for sererity variable // String is the output function for sererity variable
func (sev Severity) String() string { func (sev Severity) String() string {
name := []string{"negligible", "unknown", "low", "medium", "high"} name := []string{"negligible", "unknown", "low", "medium", "high"}
i := int64(sev) i := int64(sev)
@ -59,12 +59,12 @@ func (sev Severity) String() string {
} }
} }
//TableName is required by by beego orm to map ScanJob to table img_scan_job // TableName is required by by beego orm to map ScanJob to table img_scan_job
func (s *ScanJob) TableName() string { func (s *ScanJob) TableName() string {
return ScanJobTable return ScanJobTable
} }
//ImgScanOverview mapped to a record of image scan overview. // ImgScanOverview mapped to a record of image scan overview.
type ImgScanOverview struct { type ImgScanOverview struct {
ID int64 `orm:"pk;auto;column(id)" json:"-"` ID int64 `orm:"pk;auto;column(id)" json:"-"`
Digest string `orm:"column(image_digest)" json:"image_digest"` Digest string `orm:"column(image_digest)" json:"image_digest"`
@ -78,18 +78,18 @@ type ImgScanOverview struct {
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time,omitempty"` UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time,omitempty"`
} }
//TableName ... // TableName ...
func (iso *ImgScanOverview) TableName() string { func (iso *ImgScanOverview) TableName() string {
return ScanOverviewTable return ScanOverviewTable
} }
//ComponentsOverview has the total number and a list of components number of different serverity level. // ComponentsOverview has the total number and a list of components number of different serverity level.
type ComponentsOverview struct { type ComponentsOverview struct {
Total int `json:"total"` Total int `json:"total"`
Summary []*ComponentsOverviewEntry `json:"summary"` Summary []*ComponentsOverviewEntry `json:"summary"`
} }
//ComponentsOverviewEntry ... // ComponentsOverviewEntry ...
type ComponentsOverviewEntry struct { type ComponentsOverviewEntry struct {
Sev int `json:"severity"` Sev int `json:"severity"`
Count int `json:"count"` Count int `json:"count"`
@ -129,7 +129,7 @@ const (
ScanAllDailyTime = "daily_time" ScanAllDailyTime = "daily_time"
) )
//DefaultScanAllPolicy ... // DefaultScanAllPolicy ...
var DefaultScanAllPolicy = ScanAllPolicy{ var DefaultScanAllPolicy = ScanAllPolicy{
Type: ScanAllDaily, Type: ScanAllDaily,
Parm: map[string]interface{}{ Parm: map[string]interface{}{

View File

@ -14,7 +14,7 @@
package models package models
//UAASettings wraps the configuraations to access UAA service // UAASettings wraps the configuraations to access UAA service
type UAASettings struct { type UAASettings struct {
Endpoint string Endpoint string
ClientID string ClientID string

View File

@ -31,8 +31,8 @@ type User struct {
Comment string `orm:"column(comment)" json:"comment"` Comment string `orm:"column(comment)" json:"comment"`
Deleted bool `orm:"column(deleted)" json:"deleted"` Deleted bool `orm:"column(deleted)" json:"deleted"`
Rolename string `orm:"-" json:"role_name"` Rolename string `orm:"-" json:"role_name"`
//if this field is named as "RoleID", beego orm can not map role_id // if this field is named as "RoleID", beego orm can not map role_id
//to it. // to it.
Role int `orm:"-" json:"role_id"` Role int `orm:"-" json:"role_id"`
// RoleList []Role `json:"role_list"` // RoleList []Role `json:"role_list"`
HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"` HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`

View File

@ -29,7 +29,7 @@ type WatchItem struct {
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"` UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
} }
//TableName ... // TableName ...
func (w *WatchItem) TableName() string { func (w *WatchItem) TableName() string {
return "replication_immediate_trigger" return "replication_immediate_trigger"
} }

View File

@ -7,13 +7,13 @@ import (
"github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/common/utils"
) )
//WatchConfigChanges is used to watch the configuration changes. // WatchConfigChanges is used to watch the configuration changes.
func WatchConfigChanges(cfg map[string]interface{}) error { func WatchConfigChanges(cfg map[string]interface{}) error {
if cfg == nil { if cfg == nil {
return errors.New("Empty configurations") return errors.New("Empty configurations")
} }
//Currently only watch the scan all policy change. // Currently only watch the scan all policy change.
if v, ok := cfg[ScanAllPolicyTopic]; ok { if v, ok := cfg[ScanAllPolicyTopic]; ok {
policyCfg := &models.ScanAllPolicy{} policyCfg := &models.ScanAllPolicy{}
if err := utils.ConvertMapToStruct(policyCfg, v); err != nil { if err := utils.ConvertMapToStruct(policyCfg, v); err != nil {

View File

@ -1,15 +1,15 @@
package notifier package notifier
//NotificationHandler defines what operations a notification handler // NotificationHandler defines what operations a notification handler
//should have. // should have.
type NotificationHandler interface { type NotificationHandler interface {
//Handle the event when it coming. // Handle the event when it coming.
//value might be optional, it depends on usages. // value might be optional, it depends on usages.
Handle(value interface{}) error Handle(value interface{}) error
//IsStateful returns whether the handler is stateful or not. // IsStateful returns whether the handler is stateful or not.
//If handler is stateful, it will not be triggerred in parallel. // If handler is stateful, it will not be triggerred in parallel.
//Otherwise, the handler will be triggered concurrently if more // Otherwise, the handler will be triggered concurrently if more
//than one same handler are matched the topics. // than one same handler are matched the topics.
IsStateful() bool IsStateful() bool
} }

View File

@ -10,52 +10,52 @@ import (
"github.com/goharbor/harbor/src/common/utils/log" "github.com/goharbor/harbor/src/common/utils/log"
) )
//HandlerIndexer is setup the relationship between the handler type and // HandlerIndexer is setup the relationship between the handler type and
//instance. // instance.
type HandlerIndexer map[string]NotificationHandler type HandlerIndexer map[string]NotificationHandler
//Notification wraps the topic and related data value if existing. // Notification wraps the topic and related data value if existing.
type Notification struct { type Notification struct {
//Topic of notification // Topic of notification
//Required // Required
Topic string Topic string
//Value of notification. // Value of notification.
//Optional // Optional
Value interface{} Value interface{}
} }
//HandlerChannel provides not only the chan itself but also the count of // HandlerChannel provides not only the chan itself but also the count of
//handlers related with this chan. // handlers related with this chan.
type HandlerChannel struct { type HandlerChannel struct {
//To indicate how many handler instances bound with this chan. // To indicate how many handler instances bound with this chan.
boundCount uint32 boundCount uint32
//The chan for controlling concurrent executions. // The chan for controlling concurrent executions.
channel chan bool channel chan bool
} }
//NotificationWatcher is defined to accept the events published // NotificationWatcher is defined to accept the events published
//by the sender and match it with pre-registered notification handler // by the sender and match it with pre-registered notification handler
//and then trigger the execution of the found handler. // and then trigger the execution of the found handler.
type NotificationWatcher struct { type NotificationWatcher struct {
//For handle concurrent scenario. // For handle concurrent scenario.
*sync.RWMutex *sync.RWMutex
//To keep the registered handlers in memory. // To keep the registered handlers in memory.
//Each topic can register multiple handlers. // Each topic can register multiple handlers.
//Each handler can bind to multiple topics. // Each handler can bind to multiple topics.
handlers map[string]HandlerIndexer handlers map[string]HandlerIndexer
//Keep the channels which are used to control the concurrent executions // Keep the channels which are used to control the concurrent executions
//of multiple stateful handlers with same type. // of multiple stateful handlers with same type.
handlerChannels map[string]*HandlerChannel handlerChannels map[string]*HandlerChannel
} }
//notificationWatcher is a default notification watcher in package level. // notificationWatcher is a default notification watcher in package level.
var notificationWatcher = NewNotificationWatcher() var notificationWatcher = NewNotificationWatcher()
//NewNotificationWatcher is constructor of NotificationWatcher. // NewNotificationWatcher is constructor of NotificationWatcher.
func NewNotificationWatcher() *NotificationWatcher { func NewNotificationWatcher() *NotificationWatcher {
return &NotificationWatcher{ return &NotificationWatcher{
new(sync.RWMutex), new(sync.RWMutex),
@ -64,7 +64,7 @@ func NewNotificationWatcher() *NotificationWatcher {
} }
} }
//Handle the related topic with the specified handler. // Handle the related topic with the specified handler.
func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler) error { func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler) error {
if strings.TrimSpace(topic) == "" { if strings.TrimSpace(topic) == "" {
return errors.New("Empty topic is not supported") return errors.New("Empty topic is not supported")
@ -91,11 +91,11 @@ func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler)
} }
if handler.IsStateful() { if handler.IsStateful() {
//First time // First time
if handlerChan, ok := nw.handlerChannels[t]; !ok { if handlerChan, ok := nw.handlerChannels[t]; !ok {
nw.handlerChannels[t] = &HandlerChannel{1, make(chan bool, 1)} nw.handlerChannels[t] = &HandlerChannel{1, make(chan bool, 1)}
} else { } else {
//Already have chan, just increase count // Already have chan, just increase count
handlerChan.boundCount++ handlerChan.boundCount++
} }
} }
@ -103,9 +103,9 @@ func (nw *NotificationWatcher) Handle(topic string, handler NotificationHandler)
return nil return nil
} }
//UnHandle is to revoke the registered handler with the specified topic. // UnHandle is to revoke the registered handler with the specified topic.
//'handler' is optional, the type name of the handler. If it's empty value, // 'handler' is optional, the type name of the handler. If it's empty value,
//then revoke the whole topic, otherwise only revoke the specified handler. // then revoke the whole topic, otherwise only revoke the specified handler.
func (nw *NotificationWatcher) UnHandle(topic string, handler string) error { func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
if strings.TrimSpace(topic) == "" { if strings.TrimSpace(topic) == "" {
return errors.New("Empty topic is not supported") return errors.New("Empty topic is not supported")
@ -115,20 +115,20 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
nw.Lock() nw.Lock()
var revokeHandler = func(indexer HandlerIndexer, handlerType string) bool { var revokeHandler = func(indexer HandlerIndexer, handlerType string) bool {
//Find the specified one // Find the specified one
if hd, existing := indexer[handlerType]; existing { if hd, existing := indexer[handlerType]; existing {
delete(indexer, handlerType) delete(indexer, handlerType)
if len(indexer) == 0 { if len(indexer) == 0 {
//No handler existing, then remove topic // No handler existing, then remove topic
delete(nw.handlers, topic) delete(nw.handlers, topic)
} }
//Update channel counter or remove channel // Update channel counter or remove channel
if hd.IsStateful() { if hd.IsStateful() {
if theChan, yes := nw.handlerChannels[handlerType]; yes { if theChan, yes := nw.handlerChannels[handlerType]; yes {
theChan.boundCount-- theChan.boundCount--
if theChan.boundCount == 0 { if theChan.boundCount == 0 {
//Empty, then remove the channel // Empty, then remove the channel
delete(nw.handlerChannels, handlerType) delete(nw.handlerChannels, handlerType)
} }
} }
@ -149,7 +149,7 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
return nil return nil
} }
//Revoke the specified handler. // Revoke the specified handler.
if revokeHandler(indexer, handler) { if revokeHandler(indexer, handler) {
return nil return nil
} }
@ -158,7 +158,7 @@ func (nw *NotificationWatcher) UnHandle(topic string, handler string) error {
return fmt.Errorf("Failed to revoke handler %s with topic %s", handler, topic) return fmt.Errorf("Failed to revoke handler %s with topic %s", handler, topic)
} }
//Notify that notification is coming. // Notify that notification is coming.
func (nw *NotificationWatcher) Notify(notification Notification) error { func (nw *NotificationWatcher) Notify(notification Notification) error {
if strings.TrimSpace(notification.Topic) == "" { if strings.TrimSpace(notification.Topic) == "" {
return errors.New("Empty topic can not be notified") return errors.New("Empty topic can not be notified")
@ -180,7 +180,7 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
handlers = append(handlers, h) handlers = append(handlers, h)
} }
//Trigger handlers // Trigger handlers
for _, h := range handlers { for _, h := range handlers {
var handlerChan chan bool var handlerChan chan bool
if h.IsStateful() { if h.IsStateful() {
@ -198,7 +198,7 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
} }
}() }()
if err := hd.Handle(notification.Value); err != nil { if err := hd.Handle(notification.Value); err != nil {
//Currently, we just log the error // Currently, we just log the error
log.Errorf("Error occurred when triggering handler %s of topic %s: %s\n", reflect.TypeOf(hd).String(), notification.Topic, err.Error()) log.Errorf("Error occurred when triggering handler %s of topic %s: %s\n", reflect.TypeOf(hd).String(), notification.Topic, err.Error())
} else { } else {
log.Infof("Handle notification with topic '%s': %#v\n", notification.Topic, notification.Value) log.Infof("Handle notification with topic '%s': %#v\n", notification.Topic, notification.Value)
@ -210,17 +210,17 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
return nil return nil
} }
//Subscribe is a wrapper utility method for NotificationWatcher.handle() // Subscribe is a wrapper utility method for NotificationWatcher.handle()
func Subscribe(topic string, handler NotificationHandler) error { func Subscribe(topic string, handler NotificationHandler) error {
return notificationWatcher.Handle(topic, handler) return notificationWatcher.Handle(topic, handler)
} }
//UnSubscribe is a wrapper utility method for NotificationWatcher.UnHandle() // UnSubscribe is a wrapper utility method for NotificationWatcher.UnHandle()
func UnSubscribe(topic string, handler string) error { func UnSubscribe(topic string, handler string) error {
return notificationWatcher.UnHandle(topic, handler) return notificationWatcher.UnHandle(topic, handler)
} }
//Publish is a wrapper utility method for NotificationWatcher.notify() // Publish is a wrapper utility method for NotificationWatcher.notify()
func Publish(topic string, value interface{}) error { func Publish(topic string, value interface{}) error {
return notificationWatcher.Notify(Notification{ return notificationWatcher.Notify(Notification{
Topic: topic, Topic: topic,

View File

@ -128,7 +128,7 @@ func TestPublish(t *testing.T) {
Publish("topic1", 100) Publish("topic1", 100)
Publish("topic2", 50) Publish("topic2", 50)
//Waiting for async is done // Waiting for async is done
<-time.After(1 * time.Second) <-time.After(1 * time.Second)
finalData := atomic.LoadInt32(&statefulData) finalData := atomic.LoadInt32(&statefulData)
@ -146,7 +146,7 @@ func TestPublish(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
//Clear stateful data. // Clear stateful data.
atomic.StoreInt32(&statefulData, 0) atomic.StoreInt32(&statefulData, 0)
} }
@ -161,12 +161,12 @@ func TestConcurrentPublish(t *testing.T) {
t.Fail() t.Fail()
} }
//Publish in a short interval. // Publish in a short interval.
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
Publish("topic1", 100) Publish("topic1", 100)
} }
//Waiting for async is done // Waiting for async is done
<-time.After(1 * time.Second) <-time.After(1 * time.Second)
finalData := atomic.LoadInt32(&statefulData) finalData := atomic.LoadInt32(&statefulData)
@ -179,7 +179,7 @@ func TestConcurrentPublish(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
//Clear stateful data. // Clear stateful data.
atomic.StoreInt32(&statefulData, 0) atomic.StoreInt32(&statefulData, 0)
} }
@ -206,7 +206,7 @@ func TestConcurrentPublishWithScanPolicyHandler(t *testing.T) {
} }
} }
//Wating for everything is ready. // Wating for everything is ready.
<-time.After(2 * time.Second) <-time.After(2 * time.Second)
if err := UnSubscribe("testing_topic", ""); err != nil { if err := UnSubscribe("testing_topic", ""); err != nil {
@ -218,7 +218,7 @@ func TestConcurrentPublishWithScanPolicyHandler(t *testing.T) {
} }
scheduler.DefaultScheduler.Stop() scheduler.DefaultScheduler.Stop()
//Wating for everything is ready. // Wating for everything is ready.
<-time.After(1 * time.Second) <-time.After(1 * time.Second)
if scheduler.DefaultScheduler.IsRunning() { if scheduler.DefaultScheduler.IsRunning() {
t.Fatal("Policy scheduler is not stopped") t.Fatal("Policy scheduler is not stopped")

View File

@ -13,35 +13,35 @@ import (
) )
const ( const (
//PolicyTypeDaily specify the policy type is "daily" // PolicyTypeDaily specify the policy type is "daily"
PolicyTypeDaily = "daily" PolicyTypeDaily = "daily"
//PolicyTypeNone specify the policy type is "none" // PolicyTypeNone specify the policy type is "none"
PolicyTypeNone = "none" PolicyTypeNone = "none"
alternatePolicy = "Alternate Policy" alternatePolicy = "Alternate Policy"
) )
//ScanPolicyNotification is defined for pass the policy change data. // ScanPolicyNotification is defined for pass the policy change data.
type ScanPolicyNotification struct { type ScanPolicyNotification struct {
//Type is used to keep the scan policy type: "none","daily" and "refresh". // Type is used to keep the scan policy type: "none","daily" and "refresh".
Type string Type string
//DailyTime is used when the type is 'daily', the offset with UTC time 00:00. // DailyTime is used when the type is 'daily', the offset with UTC time 00:00.
DailyTime int64 DailyTime int64
} }
//ScanPolicyNotificationHandler is defined to handle the changes of scanning // ScanPolicyNotificationHandler is defined to handle the changes of scanning
//policy. // policy.
type ScanPolicyNotificationHandler struct{} type ScanPolicyNotificationHandler struct{}
//IsStateful to indicate this handler is stateful. // IsStateful to indicate this handler is stateful.
func (s *ScanPolicyNotificationHandler) IsStateful() bool { func (s *ScanPolicyNotificationHandler) IsStateful() bool {
//Policy change should be done one by one. // Policy change should be done one by one.
return true return true
} }
//Handle the policy change notification. // Handle the policy change notification.
func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error { func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
if value == nil { if value == nil {
return errors.New("ScanPolicyNotificationHandler can not handle nil value") return errors.New("ScanPolicyNotificationHandler can not handle nil value")
@ -57,27 +57,27 @@ func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
hasScheduled := scheduler.DefaultScheduler.HasScheduled(alternatePolicy) hasScheduled := scheduler.DefaultScheduler.HasScheduled(alternatePolicy)
if notification.Type == PolicyTypeDaily { if notification.Type == PolicyTypeDaily {
if !hasScheduled { if !hasScheduled {
//Schedule a new policy. // Schedule a new policy.
return schedulePolicy(notification) return schedulePolicy(notification)
} }
//To check and compare if the related parameter is changed. // To check and compare if the related parameter is changed.
if pl := scheduler.DefaultScheduler.GetPolicy(alternatePolicy); pl != nil { if pl := scheduler.DefaultScheduler.GetPolicy(alternatePolicy); pl != nil {
policyCandidate := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{ policyCandidate := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{
Duration: 24 * time.Hour, Duration: 24 * time.Hour,
OffsetTime: notification.DailyTime, OffsetTime: notification.DailyTime,
}) })
if !pl.Equal(policyCandidate) { if !pl.Equal(policyCandidate) {
//Parameter changed. // Parameter changed.
//Unschedule policy. // Unschedule policy.
if err := scheduler.DefaultScheduler.UnSchedule(alternatePolicy); err != nil { if err := scheduler.DefaultScheduler.UnSchedule(alternatePolicy); err != nil {
return err return err
} }
//Schedule a new policy. // Schedule a new policy.
return schedulePolicy(notification) return schedulePolicy(notification)
} }
//Same policy configuration, do nothing // Same policy configuration, do nothing
return nil return nil
} }
@ -93,7 +93,7 @@ func (s *ScanPolicyNotificationHandler) Handle(value interface{}) error {
return nil return nil
} }
//Schedule policy. // Schedule policy.
func schedulePolicy(notification ScanPolicyNotification) error { func schedulePolicy(notification ScanPolicyNotification) error {
schedulePolicy := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{ schedulePolicy := policy.NewAlternatePolicy(alternatePolicy, &policy.AlternatePolicyConfiguration{
Duration: 24 * time.Hour, Duration: 24 * time.Hour,

View File

@ -11,7 +11,7 @@ import (
var testingScheduler = scheduler.DefaultScheduler var testingScheduler = scheduler.DefaultScheduler
func TestScanPolicyNotificationHandler(t *testing.T) { func TestScanPolicyNotificationHandler(t *testing.T) {
//Scheduler should be running. // Scheduler should be running.
testingScheduler.Start() testingScheduler.Start()
if !testingScheduler.IsRunning() { if !testingScheduler.IsRunning() {
t.Fatal("scheduler should be running") t.Fatal("scheduler should be running")
@ -32,7 +32,7 @@ func TestScanPolicyNotificationHandler(t *testing.T) {
t.Fatal("Handler does not work") t.Fatal("Handler does not work")
} }
//Policy parameter changed. // Policy parameter changed.
notification2 := ScanPolicyNotification{"daily", utcTime + 7200} notification2 := ScanPolicyNotification{"daily", utcTime + 7200}
if err := handler.Handle(notification2); err != nil { if err := handler.Handle(notification2); err != nil {
t.Fatal(err) t.Fatal(err)
@ -63,9 +63,9 @@ func TestScanPolicyNotificationHandler(t *testing.T) {
t.Fail() t.Fail()
} }
//Clear // Clear
testingScheduler.Stop() testingScheduler.Stop()
//Waiting for everything is ready. // Waiting for everything is ready.
<-time.After(1 * time.Second) <-time.After(1 * time.Second)
if testingScheduler.IsRunning() { if testingScheduler.IsRunning() {
t.Fatal("scheduler should be stopped") t.Fatal("scheduler should be stopped")

View File

@ -4,8 +4,8 @@ import (
"github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common"
) )
//Define global topic names // Define global topic names
const ( const (
//ScanAllPolicyTopic is for notifying the change of scanning all policy. // ScanAllPolicyTopic is for notifying the change of scanning all policy.
ScanAllPolicyTopic = common.ScanAllPolicy ScanAllPolicyTopic = common.ScanAllPolicy
) )

View File

@ -14,52 +14,52 @@ const (
oneDay = 24 * 3600 oneDay = 24 * 3600
) )
//AlternatePolicyConfiguration store the related configurations for alternate policy. // AlternatePolicyConfiguration store the related configurations for alternate policy.
type AlternatePolicyConfiguration struct { type AlternatePolicyConfiguration struct {
//Duration is the interval of executing attached tasks. // Duration is the interval of executing attached tasks.
//E.g: 24*3600 for daily // E.g: 24*3600 for daily
// 7*24*3600 for weekly // 7*24*3600 for weekly
Duration time.Duration Duration time.Duration
//An integer to indicate the the weekday of the week. Please be noted that Sunday is 7. // An integer to indicate the the weekday of the week. Please be noted that Sunday is 7.
//Use default value 0 to indicate weekday is not set. // Use default value 0 to indicate weekday is not set.
//To support by weekly function. // To support by weekly function.
Weekday int8 Weekday int8
//OffsetTime is the execution time point of each turn // OffsetTime is the execution time point of each turn
//It's a number to indicate the seconds offset to the 00:00 of UTC time. // It's a number to indicate the seconds offset to the 00:00 of UTC time.
OffsetTime int64 OffsetTime int64
} }
//AlternatePolicy is a policy that repeatedly executing tasks with specified duration during a specified time scope. // AlternatePolicy is a policy that repeatedly executing tasks with specified duration during a specified time scope.
type AlternatePolicy struct { type AlternatePolicy struct {
//To sync the related operations. // To sync the related operations.
*sync.RWMutex *sync.RWMutex
//Keep the attached tasks. // Keep the attached tasks.
tasks task.Store tasks task.Store
//Policy configurations. // Policy configurations.
config *AlternatePolicyConfiguration config *AlternatePolicyConfiguration
//To indicated whether policy is enabled or not. // To indicated whether policy is enabled or not.
isEnabled bool isEnabled bool
//Channel used to send evaluation result signals. // Channel used to send evaluation result signals.
evaluation chan bool evaluation chan bool
//Channel used to notify policy termination. // Channel used to notify policy termination.
done chan bool done chan bool
//Channel used to receive terminate signal. // Channel used to receive terminate signal.
terminator chan bool terminator chan bool
//Unique name of this policy to support multiple instances // Unique name of this policy to support multiple instances
name string name string
} }
//NewAlternatePolicy is constructor of creating AlternatePolicy. // NewAlternatePolicy is constructor of creating AlternatePolicy.
//Accept name and configuration as parameters. // Accept name and configuration as parameters.
func NewAlternatePolicy(name string, config *AlternatePolicyConfiguration) *AlternatePolicy { func NewAlternatePolicy(name string, config *AlternatePolicyConfiguration) *AlternatePolicy {
return &AlternatePolicy{ return &AlternatePolicy{
RWMutex: new(sync.RWMutex), RWMutex: new(sync.RWMutex),
@ -71,27 +71,27 @@ func NewAlternatePolicy(name string, config *AlternatePolicyConfiguration) *Alte
} }
} }
//GetConfig returns the current configuration options of this policy. // GetConfig returns the current configuration options of this policy.
func (alp *AlternatePolicy) GetConfig() *AlternatePolicyConfiguration { func (alp *AlternatePolicy) GetConfig() *AlternatePolicyConfiguration {
return alp.config return alp.config
} }
//Name is an implementation of same method in policy interface. // Name is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Name() string { func (alp *AlternatePolicy) Name() string {
return alp.name return alp.name
} }
//Tasks is an implementation of same method in policy interface. // Tasks is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Tasks() []task.Task { func (alp *AlternatePolicy) Tasks() []task.Task {
return alp.tasks.GetTasks() return alp.tasks.GetTasks()
} }
//Done is an implementation of same method in policy interface. // Done is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Done() <-chan bool { func (alp *AlternatePolicy) Done() <-chan bool {
return alp.done return alp.done
} }
//AttachTasks is an implementation of same method in policy interface. // AttachTasks is an implementation of same method in policy interface.
func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error { func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error {
if len(tasks) == 0 { if len(tasks) == 0 {
return errors.New("No tasks can be attached") return errors.New("No tasks can be attached")
@ -102,7 +102,7 @@ func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error {
return nil return nil
} }
//Disable is an implementation of same method in policy interface. // Disable is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Disable() error { func (alp *AlternatePolicy) Disable() error {
alp.Lock() alp.Lock()
if !alp.isEnabled { if !alp.isEnabled {
@ -110,33 +110,33 @@ func (alp *AlternatePolicy) Disable() error {
return fmt.Errorf("Instance of policy %s is not enabled", alp.Name()) return fmt.Errorf("Instance of policy %s is not enabled", alp.Name())
} }
//Set state to disabled // Set state to disabled
alp.isEnabled = false alp.isEnabled = false
alp.Unlock() alp.Unlock()
//Stop the evaluation goroutine // Stop the evaluation goroutine
alp.terminator <- true alp.terminator <- true
return nil return nil
} }
//Evaluate is an implementation of same method in policy interface. // Evaluate is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) { func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
//Lock for state changing // Lock for state changing
defer alp.Unlock() defer alp.Unlock()
alp.Lock() alp.Lock()
//Check if configuration is valid // Check if configuration is valid
if !alp.isValidConfig() { if !alp.isValidConfig() {
return nil, errors.New("Policy configuration is not valid") return nil, errors.New("Policy configuration is not valid")
} }
//Check if policy instance is still running // Check if policy instance is still running
if alp.isEnabled { if alp.isEnabled {
return nil, fmt.Errorf("Instance of policy %s is still running", alp.Name()) return nil, fmt.Errorf("Instance of policy %s is still running", alp.Name())
} }
//Keep idempotent // Keep idempotent
if alp.evaluation != nil { if alp.evaluation != nil {
return alp.evaluation, nil return alp.evaluation, nil
} }
@ -150,8 +150,8 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
) )
timeNow := time.Now().UTC() timeNow := time.Now().UTC()
//Reach the execution time point? // Reach the execution time point?
//Weekday is set // Weekday is set
if alp.config.Weekday > 0 { if alp.config.Weekday > 0 {
targetWeekday := (alp.config.Weekday + 7) % 7 targetWeekday := (alp.config.Weekday + 7) % 7
currentWeekday := timeNow.Weekday() currentWeekday := timeNow.Weekday()
@ -162,7 +162,7 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
waitingTime = (int64)(weekdayDiff * oneDay) waitingTime = (int64)(weekdayDiff * oneDay)
} }
//Time // Time
utcTime := (int64)(timeNow.Hour()*3600 + timeNow.Minute()*60) utcTime := (int64)(timeNow.Hour()*3600 + timeNow.Minute()*60)
diff := alp.config.OffsetTime - utcTime diff := alp.config.OffsetTime - utcTime
if waitingTime > 0 { if waitingTime > 0 {
@ -174,9 +174,9 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
} }
} }
//Let's wait for a while // Let's wait for a while
if waitingTime > 0 { if waitingTime > 0 {
//Wait for a while. // Wait for a while.
log.Infof("Waiting for %d seconds after comparing offset %d and utc time %d\n", diff, alp.config.OffsetTime, utcTime) log.Infof("Waiting for %d seconds after comparing offset %d and utc time %d\n", diff, alp.config.OffsetTime, utcTime)
select { select {
case <-time.After(time.Duration(waitingTime) * time.Second): case <-time.After(time.Duration(waitingTime) * time.Second):
@ -185,10 +185,10 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
} }
} }
//Trigger the first tick. // Trigger the first tick.
alp.evaluation <- true alp.evaluation <- true
//Start the ticker for repeat checking. // Start the ticker for repeat checking.
tk := time.NewTicker(alp.config.Duration) tk := time.NewTicker(alp.config.Duration)
defer func() { defer func() {
if tk != nil { if tk != nil {
@ -208,13 +208,13 @@ func (alp *AlternatePolicy) Evaluate() (<-chan bool, error) {
} }
}() }()
//Enabled // Enabled
alp.isEnabled = true alp.isEnabled = true
return alp.evaluation, nil return alp.evaluation, nil
} }
//Equal is an implementation of same method in policy interface. // Equal is an implementation of same method in policy interface.
func (alp *AlternatePolicy) Equal(p Policy) bool { func (alp *AlternatePolicy) Equal(p Policy) bool {
if p == nil { if p == nil {
return false return false
@ -237,7 +237,7 @@ func (alp *AlternatePolicy) Equal(p Policy) bool {
cfg.Weekday == cfg2.Weekday) cfg.Weekday == cfg2.Weekday)
} }
//IsEnabled is an implementation of same method in policy interface. // IsEnabled is an implementation of same method in policy interface.
func (alp *AlternatePolicy) IsEnabled() bool { func (alp *AlternatePolicy) IsEnabled() bool {
defer alp.RUnlock() defer alp.RUnlock()
alp.RLock() alp.RLock()
@ -245,7 +245,7 @@ func (alp *AlternatePolicy) IsEnabled() bool {
return alp.isEnabled return alp.isEnabled
} }
//Check if the config is valid. At least it should have the configurations for supporting daily policy. // Check if the config is valid. At least it should have the configurations for supporting daily policy.
func (alp *AlternatePolicy) isValidConfig() bool { func (alp *AlternatePolicy) isValidConfig() bool {
return alp.config != nil && alp.config.Duration > 0 && alp.config.OffsetTime >= 0 return alp.config != nil && alp.config.Duration > 0 && alp.config.OffsetTime >= 0
} }

View File

@ -112,9 +112,9 @@ func TestDisablePolicy(t *testing.T) {
if tp.Disable() != nil { if tp.Disable() != nil {
t.Fatal("Failed to disable policy") t.Fatal("Failed to disable policy")
} }
//Waiting for everything is stable // Waiting for everything is stable
<-time.After(1 * time.Second) <-time.After(1 * time.Second)
//Copy value // Copy value
var copiedCounter int32 var copiedCounter int32
atomic.StoreInt32(&copiedCounter, atomic.LoadInt32(&counter)) atomic.StoreInt32(&copiedCounter, atomic.LoadInt32(&counter))
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)

View File

@ -4,45 +4,45 @@ import (
"github.com/goharbor/harbor/src/common/scheduler/task" "github.com/goharbor/harbor/src/common/scheduler/task"
) )
//Policy is an if-then logic to determine how the attached tasks should be // Policy is an if-then logic to determine how the attached tasks should be
//executed based on the evaluation result of the defined conditions. // executed based on the evaluation result of the defined conditions.
//E.g: // E.g:
// Daily execute TASK between 2017/06/24 and 2018/06/23 // Daily execute TASK between 2017/06/24 and 2018/06/23
// Execute TASK at 2017/09/01 14:30:00 // Execute TASK at 2017/09/01 14:30:00
// //
//Each policy should have a name to identify itself. // Each policy should have a name to identify itself.
//Please be aware that policy with no tasks will be treated as invalid. // Please be aware that policy with no tasks will be treated as invalid.
// //
type Policy interface { type Policy interface {
//Name will return the name of the policy. // Name will return the name of the policy.
//If the policy supports multiple instances, please make sure the name is unique as an UUID. // If the policy supports multiple instances, please make sure the name is unique as an UUID.
Name() string Name() string
//Tasks will return the attached tasks with this policy. // Tasks will return the attached tasks with this policy.
Tasks() []task.Task Tasks() []task.Task
//AttachTasks is to attach tasks to this policy // AttachTasks is to attach tasks to this policy
AttachTasks(...task.Task) error AttachTasks(...task.Task) error
//Done will setup a channel for other components to check whether or not // Done will setup a channel for other components to check whether or not
//the policy is completed. Possibly designed for the none loop policy. // the policy is completed. Possibly designed for the none loop policy.
Done() <-chan bool Done() <-chan bool
//Evaluate the policy based on its definition and return the result via // Evaluate the policy based on its definition and return the result via
//result channel. Policy is enabled after it is evaluated. // result channel. Policy is enabled after it is evaluated.
//Make sure Evaluate is idempotent, that means one policy can be only enabled // Make sure Evaluate is idempotent, that means one policy can be only enabled
//only once even if Evaluate is called more than one times. // only once even if Evaluate is called more than one times.
Evaluate() (<-chan bool, error) Evaluate() (<-chan bool, error)
//Disable the enabled policy and release all the allocated resources. // Disable the enabled policy and release all the allocated resources.
Disable() error Disable() error
//Equal will compare the two policies based on related factors if existing such as confgiuration etc. // Equal will compare the two policies based on related factors if existing such as confgiuration etc.
//to determine whether the two policies are same ones or not. Please pay attention that, not every policy // to determine whether the two policies are same ones or not. Please pay attention that, not every policy
//needs to support this method. If no need, please directly return false to indicate each policies are // needs to support this method. If no need, please directly return false to indicate each policies are
//different. // different.
Equal(p Policy) bool Equal(p Policy) bool
//IsEnabled is to indicate whether the policy is enabled or not (disabled). // IsEnabled is to indicate whether the policy is enabled or not (disabled).
IsEnabled() bool IsEnabled() bool
} }

View File

@ -6,8 +6,8 @@ import (
"io" "io"
) )
//NewUUID will generate a new UUID. // NewUUID will generate a new UUID.
//Code copied from https://play.golang.org/p/4FkNSiUDMg // Code copied from https://play.golang.org/p/4FkNSiUDMg
func newUUID() (string, error) { func newUUID() (string, error) {
uuid := make([]byte, 16) uuid := make([]byte, 16)
n, err := io.ReadFull(rand.Reader, uuid) n, err := io.ReadFull(rand.Reader, uuid)

View File

@ -22,72 +22,72 @@ const (
statTaskFail = "Task Fail" statTaskFail = "Task Fail"
) )
//StatItem is defined for the stat metrics. // StatItem is defined for the stat metrics.
type StatItem struct { type StatItem struct {
//Metrics catalog // Metrics catalog
Type string Type string
//The stat value // The stat value
Value uint32 Value uint32
//Attach some other info // Attach some other info
Attachment interface{} Attachment interface{}
} }
//StatSummary is used to collect some metrics of scheduler. // StatSummary is used to collect some metrics of scheduler.
type StatSummary struct { type StatSummary struct {
//Count of scheduled policy // Count of scheduled policy
PolicyCount uint32 PolicyCount uint32
//Total count of tasks // Total count of tasks
Tasks uint32 Tasks uint32
//Count of successfully complete tasks // Count of successfully complete tasks
CompletedTasks uint32 CompletedTasks uint32
//Count of tasks with errors // Count of tasks with errors
TasksWithError uint32 TasksWithError uint32
} }
//Configuration defines configuration of Scheduler. // Configuration defines configuration of Scheduler.
type Configuration struct { type Configuration struct {
QueueSize uint8 QueueSize uint8
} }
//Scheduler is designed for scheduling policies. // Scheduler is designed for scheduling policies.
type Scheduler struct { type Scheduler struct {
//Mutex for sync controlling. // Mutex for sync controlling.
*sync.RWMutex *sync.RWMutex
//Related configuration options for scheduler. // Related configuration options for scheduler.
config *Configuration config *Configuration
//Store to keep the references of scheduled policies. // Store to keep the references of scheduled policies.
policies Store policies Store
//Queue for receiving policy scheduling request // Queue for receiving policy scheduling request
scheduleQueue chan *Watcher scheduleQueue chan *Watcher
//Queue for receiving policy unscheduling request or complete signal. // Queue for receiving policy unscheduling request or complete signal.
unscheduleQueue chan *Watcher unscheduleQueue chan *Watcher
//Channel for receiving stat metrics. // Channel for receiving stat metrics.
statChan chan *StatItem statChan chan *StatItem
//Channel for terminate scheduler damon. // Channel for terminate scheduler damon.
terminateChan chan bool terminateChan chan bool
//The stat metrics of scheduler. // The stat metrics of scheduler.
stats *StatSummary stats *StatSummary
//To indicate whether scheduler is running or not // To indicate whether scheduler is running or not
isRunning bool isRunning bool
} }
//DefaultScheduler is a default scheduler. // DefaultScheduler is a default scheduler.
var DefaultScheduler = NewScheduler(nil) var DefaultScheduler = NewScheduler(nil)
//NewScheduler is constructor for creating a scheduler. // NewScheduler is constructor for creating a scheduler.
func NewScheduler(config *Configuration) *Scheduler { func NewScheduler(config *Configuration) *Scheduler {
var qSize uint8 = defaultQueueSize var qSize uint8 = defaultQueueSize
if config != nil && config.QueueSize > 0 { if config != nil && config.QueueSize > 0 {
@ -118,12 +118,12 @@ func NewScheduler(config *Configuration) *Scheduler {
} }
} }
//Start the scheduler damon. // Start the scheduler damon.
func (sch *Scheduler) Start() { func (sch *Scheduler) Start() {
sch.Lock() sch.Lock()
defer sch.Unlock() defer sch.Unlock()
//If scheduler is already running // If scheduler is already running
if sch.isRunning { if sch.isRunning {
return return
} }
@ -135,32 +135,32 @@ func (sch *Scheduler) Start() {
} }
}() }()
defer func() { defer func() {
//Clear resources // Clear resources
sch.policies.Clear() sch.policies.Clear()
log.Infof("Policy scheduler stop at %s\n", time.Now().UTC().Format(time.RFC3339)) log.Infof("Policy scheduler stop at %s\n", time.Now().UTC().Format(time.RFC3339))
}() }()
for { for {
select { select {
case <-sch.terminateChan: case <-sch.terminateChan:
//Exit // Exit
return return
case wt := <-sch.scheduleQueue: case wt := <-sch.scheduleQueue:
//If status is stopped, no requests should be served // If status is stopped, no requests should be served
if !sch.IsRunning() { if !sch.IsRunning() {
continue continue
} }
go func(watcher *Watcher) { go func(watcher *Watcher) {
if watcher != nil && watcher.p != nil { if watcher != nil && watcher.p != nil {
//Enable it. // Enable it.
watcher.Start() watcher.Start()
//Update stats and log info. // Update stats and log info.
log.Infof("Policy %s is scheduled", watcher.p.Name()) log.Infof("Policy %s is scheduled", watcher.p.Name())
sch.statChan <- &StatItem{statSchedulePolicy, 1, nil} sch.statChan <- &StatItem{statSchedulePolicy, 1, nil}
} }
}(wt) }(wt)
case wt := <-sch.unscheduleQueue: case wt := <-sch.unscheduleQueue:
//If status is stopped, no requests should be served // If status is stopped, no requests should be served
if !sch.IsRunning() { if !sch.IsRunning() {
continue continue
} }
@ -168,14 +168,14 @@ func (sch *Scheduler) Start() {
if watcher != nil && watcher.IsRunning() { if watcher != nil && watcher.IsRunning() {
watcher.Stop() watcher.Stop()
//Update stats and log info. // Update stats and log info.
log.Infof("Policy %s is unscheduled", watcher.p.Name()) log.Infof("Policy %s is unscheduled", watcher.p.Name())
sch.statChan <- &StatItem{statUnSchedulePolicy, 1, nil} sch.statChan <- &StatItem{statUnSchedulePolicy, 1, nil}
} }
}(wt) }(wt)
case stat := <-sch.statChan: case stat := <-sch.statChan:
{ {
//If status is stopped, no requests should be served // If status is stopped, no requests should be served
if !sch.IsRunning() { if !sch.IsRunning() {
continue continue
} }
@ -218,12 +218,12 @@ func (sch *Scheduler) Start() {
log.Infof("Policy scheduler start at %s\n", time.Now().UTC().Format(time.RFC3339)) log.Infof("Policy scheduler start at %s\n", time.Now().UTC().Format(time.RFC3339))
} }
//Stop the scheduler damon. // Stop the scheduler damon.
func (sch *Scheduler) Stop() { func (sch *Scheduler) Stop() {
//Lock for state changing // Lock for state changing
sch.Lock() sch.Lock()
//Check if the scheduler is running // Check if the scheduler is running
if !sch.isRunning { if !sch.isRunning {
sch.Unlock() sch.Unlock()
return return
@ -232,11 +232,11 @@ func (sch *Scheduler) Stop() {
sch.isRunning = false sch.isRunning = false
sch.Unlock() sch.Unlock()
//Terminate damon to stop receiving signals. // Terminate damon to stop receiving signals.
sch.terminateChan <- true sch.terminateChan <- true
} }
//Schedule and enable the policy. // Schedule and enable the policy.
func (sch *Scheduler) Schedule(scheduledPolicy policy.Policy) error { func (sch *Scheduler) Schedule(scheduledPolicy policy.Policy) error {
if scheduledPolicy == nil { if scheduledPolicy == nil {
return errors.New("nil is not Policy object") return errors.New("nil is not Policy object")
@ -251,38 +251,38 @@ func (sch *Scheduler) Schedule(scheduledPolicy policy.Policy) error {
return errors.New("Policy must attach task(s)") return errors.New("Policy must attach task(s)")
} }
//Try to schedule the policy. // Try to schedule the policy.
//Keep the policy for future use after it's successfully scheduled. // Keep the policy for future use after it's successfully scheduled.
watcher := NewWatcher(scheduledPolicy, sch.statChan, sch.unscheduleQueue) watcher := NewWatcher(scheduledPolicy, sch.statChan, sch.unscheduleQueue)
if err := sch.policies.Put(scheduledPolicy.Name(), watcher); err != nil { if err := sch.policies.Put(scheduledPolicy.Name(), watcher); err != nil {
return err return err
} }
//Schedule the policy // Schedule the policy
sch.scheduleQueue <- watcher sch.scheduleQueue <- watcher
return nil return nil
} }
//UnSchedule the specified policy from the enabled policies list. // UnSchedule the specified policy from the enabled policies list.
func (sch *Scheduler) UnSchedule(policyName string) error { func (sch *Scheduler) UnSchedule(policyName string) error {
if strings.TrimSpace(policyName) == "" { if strings.TrimSpace(policyName) == "" {
return errors.New("Empty policy name is invalid") return errors.New("Empty policy name is invalid")
} }
//Find the watcher. // Find the watcher.
watcher := sch.policies.Remove(policyName) watcher := sch.policies.Remove(policyName)
if watcher == nil { if watcher == nil {
return fmt.Errorf("Policy %s is not existing", policyName) return fmt.Errorf("Policy %s is not existing", policyName)
} }
//Unschedule the policy. // Unschedule the policy.
sch.unscheduleQueue <- watcher sch.unscheduleQueue <- watcher
return nil return nil
} }
//IsRunning to indicate whether the scheduler is running. // IsRunning to indicate whether the scheduler is running.
func (sch *Scheduler) IsRunning() bool { func (sch *Scheduler) IsRunning() bool {
sch.RLock() sch.RLock()
defer sch.RUnlock() defer sch.RUnlock()
@ -290,12 +290,12 @@ func (sch *Scheduler) IsRunning() bool {
return sch.isRunning return sch.isRunning
} }
//HasScheduled is to check whether the given policy has been scheduled or not. // HasScheduled is to check whether the given policy has been scheduled or not.
func (sch *Scheduler) HasScheduled(policyName string) bool { func (sch *Scheduler) HasScheduled(policyName string) bool {
return sch.policies.Exists(policyName) return sch.policies.Exists(policyName)
} }
//GetPolicy is used to get related policy reference by its name. // GetPolicy is used to get related policy reference by its name.
func (sch *Scheduler) GetPolicy(policyName string) policy.Policy { func (sch *Scheduler) GetPolicy(policyName string) policy.Policy {
wk := sch.policies.Get(policyName) wk := sch.policies.Get(policyName)
if wk != nil { if wk != nil {
@ -305,7 +305,7 @@ func (sch *Scheduler) GetPolicy(policyName string) policy.Policy {
return nil return nil
} }
//PolicyCount returns the count of currently scheduled policies in the scheduler. // PolicyCount returns the count of currently scheduled policies in the scheduler.
func (sch *Scheduler) PolicyCount() uint32 { func (sch *Scheduler) PolicyCount() uint32 {
return sch.policies.Size() return sch.policies.Size()
} }

View File

@ -7,46 +7,46 @@ import (
"sync" "sync"
) )
//Store define the basic operations for storing and managing policy watcher. // Store define the basic operations for storing and managing policy watcher.
type Store interface { type Store interface {
//Put a new policy in. // Put a new policy in.
Put(key string, value *Watcher) error Put(key string, value *Watcher) error
//Get the corresponding policy with the key. // Get the corresponding policy with the key.
Get(key string) *Watcher Get(key string) *Watcher
//Exists is to check if the key existing in the store. // Exists is to check if the key existing in the store.
Exists(key string) bool Exists(key string) bool
//Remove the specified policy and return its reference. // Remove the specified policy and return its reference.
Remove(key string) *Watcher Remove(key string) *Watcher
//Size return the total count of items in store. // Size return the total count of items in store.
Size() uint32 Size() uint32
//GetAll is to get all the items in the store. // GetAll is to get all the items in the store.
GetAll() []*Watcher GetAll() []*Watcher
//Clear store. // Clear store.
Clear() Clear()
} }
//DefaultStore implements Store interface to keep the scheduled policies. // DefaultStore implements Store interface to keep the scheduled policies.
//Not support concurrent sync. // Not support concurrent sync.
type DefaultStore struct { type DefaultStore struct {
//Support sync locking // Support sync locking
*sync.RWMutex *sync.RWMutex
//Map used to keep the policy list. // Map used to keep the policy list.
data map[string]*Watcher data map[string]*Watcher
} }
//NewDefaultStore is used to create a new store and return the pointer reference. // NewDefaultStore is used to create a new store and return the pointer reference.
func NewDefaultStore() *DefaultStore { func NewDefaultStore() *DefaultStore {
return &DefaultStore{new(sync.RWMutex), make(map[string]*Watcher)} return &DefaultStore{new(sync.RWMutex), make(map[string]*Watcher)}
} }
//Put a policy into store. // Put a policy into store.
func (cs *DefaultStore) Put(key string, value *Watcher) error { func (cs *DefaultStore) Put(key string, value *Watcher) error {
if strings.TrimSpace(key) == "" || value == nil { if strings.TrimSpace(key) == "" || value == nil {
return errors.New("Bad arguments") return errors.New("Bad arguments")
@ -64,7 +64,7 @@ func (cs *DefaultStore) Put(key string, value *Watcher) error {
return nil return nil
} }
//Get policy via key. // Get policy via key.
func (cs *DefaultStore) Get(key string) *Watcher { func (cs *DefaultStore) Get(key string) *Watcher {
if strings.TrimSpace(key) == "" { if strings.TrimSpace(key) == "" {
return nil return nil
@ -76,7 +76,7 @@ func (cs *DefaultStore) Get(key string) *Watcher {
return cs.data[key] return cs.data[key]
} }
//Exists is used to check whether or not the key exists in store. // Exists is used to check whether or not the key exists in store.
func (cs *DefaultStore) Exists(key string) bool { func (cs *DefaultStore) Exists(key string) bool {
if strings.TrimSpace(key) == "" { if strings.TrimSpace(key) == "" {
return false return false
@ -90,7 +90,7 @@ func (cs *DefaultStore) Exists(key string) bool {
return ok return ok
} }
//Remove is to delete the specified policy. // Remove is to delete the specified policy.
func (cs *DefaultStore) Remove(key string) *Watcher { func (cs *DefaultStore) Remove(key string) *Watcher {
if strings.TrimSpace(key) == "" { if strings.TrimSpace(key) == "" {
return nil return nil
@ -107,7 +107,7 @@ func (cs *DefaultStore) Remove(key string) *Watcher {
return nil return nil
} }
//Size return the total count of items in store. // Size return the total count of items in store.
func (cs *DefaultStore) Size() uint32 { func (cs *DefaultStore) Size() uint32 {
cs.RLock() cs.RLock()
defer cs.RUnlock() defer cs.RUnlock()
@ -115,7 +115,7 @@ func (cs *DefaultStore) Size() uint32 {
return (uint32)(len(cs.data)) return (uint32)(len(cs.data))
} }
//GetAll to get all the items of store. // GetAll to get all the items of store.
func (cs *DefaultStore) GetAll() []*Watcher { func (cs *DefaultStore) GetAll() []*Watcher {
cs.RLock() cs.RLock()
defer cs.RUnlock() defer cs.RUnlock()
@ -129,7 +129,7 @@ func (cs *DefaultStore) GetAll() []*Watcher {
return all return all
} }
//Clear all the items in store. // Clear all the items in store.
func (cs *DefaultStore) Clear() { func (cs *DefaultStore) Clear() {
cs.Lock() cs.Lock()
defer cs.Unlock() defer cs.Unlock()

View File

@ -88,7 +88,7 @@ func (ft *fakeTask) Number() int32 {
return atomic.LoadInt32(&(ft.number)) return atomic.LoadInt32(&(ft.number))
} }
//Wacher will be tested together with scheduler. // Wacher will be tested together with scheduler.
func TestScheduler(t *testing.T) { func TestScheduler(t *testing.T) {
DefaultScheduler.Start() DefaultScheduler.Start()
if DefaultScheduler.policies.Size() != 0 { if DefaultScheduler.policies.Size() != 0 {

View File

@ -6,24 +6,24 @@ import (
"github.com/goharbor/harbor/src/replication/event/topic" "github.com/goharbor/harbor/src/replication/event/topic"
) )
//Task is the task for triggering one replication // Task is the task for triggering one replication
type Task struct { type Task struct {
PolicyID int64 PolicyID int64
} }
//NewTask is constructor of creating ReplicationTask // NewTask is constructor of creating ReplicationTask
func NewTask(policyID int64) *Task { func NewTask(policyID int64) *Task {
return &Task{ return &Task{
PolicyID: policyID, PolicyID: policyID,
} }
} }
//Name returns the name of this task // Name returns the name of this task
func (t *Task) Name() string { func (t *Task) Name() string {
return "replication" return "replication"
} }
//Run the actions here // Run the actions here
func (t *Task) Run() error { func (t *Task) Run() error {
return notifier.Publish(topic.StartReplicationTopic, notification.StartReplicationNotification{ return notifier.Publish(topic.StartReplicationTopic, notification.StartReplicationNotification{
PolicyID: t.PolicyID, PolicyID: t.PolicyID,

View File

@ -4,20 +4,20 @@ import (
"github.com/goharbor/harbor/src/ui/utils" "github.com/goharbor/harbor/src/ui/utils"
) )
//ScanAllTask is task of scanning all tags. // ScanAllTask is task of scanning all tags.
type ScanAllTask struct{} type ScanAllTask struct{}
//NewScanAllTask is constructor of creating ScanAllTask. // NewScanAllTask is constructor of creating ScanAllTask.
func NewScanAllTask() *ScanAllTask { func NewScanAllTask() *ScanAllTask {
return &ScanAllTask{} return &ScanAllTask{}
} }
//Name returns the name of the task. // Name returns the name of the task.
func (sat *ScanAllTask) Name() string { func (sat *ScanAllTask) Name() string {
return "scan all" return "scan all"
} }
//Run the actions. // Run the actions.
func (sat *ScanAllTask) Run() error { func (sat *ScanAllTask) Run() error {
return utils.ScanAllImages() return utils.ScanAllImages()
} }

View File

@ -1,10 +1,10 @@
package task package task
//Task is used to synchronously run specific action(s). // Task is used to synchronously run specific action(s).
type Task interface { type Task interface {
//Name should return the name of the task. // Name should return the name of the task.
Name() string Name() string
//Run the concrete code here // Run the concrete code here
Run() error Run() error
} }

View File

@ -4,30 +4,30 @@ import (
"sync" "sync"
) )
//Store is designed to keep the tasks. // Store is designed to keep the tasks.
type Store interface { type Store interface {
//GetTasks return the current existing list in store. // GetTasks return the current existing list in store.
GetTasks() []Task GetTasks() []Task
//AddTasks is used to append tasks to the list. // AddTasks is used to append tasks to the list.
AddTasks(tasks ...Task) AddTasks(tasks ...Task)
} }
//DefaultStore is the default implemetation of Store interface. // DefaultStore is the default implemetation of Store interface.
type DefaultStore struct { type DefaultStore struct {
//To sync the related operations. // To sync the related operations.
*sync.RWMutex *sync.RWMutex
//The space to keep the tasks. // The space to keep the tasks.
tasks []Task tasks []Task
} }
//NewDefaultStore is constructor method for DefaultStore. // NewDefaultStore is constructor method for DefaultStore.
func NewDefaultStore() *DefaultStore { func NewDefaultStore() *DefaultStore {
return &DefaultStore{new(sync.RWMutex), []Task{}} return &DefaultStore{new(sync.RWMutex), []Task{}}
} }
//GetTasks implements the same method in Store interface. // GetTasks implements the same method in Store interface.
func (ds *DefaultStore) GetTasks() []Task { func (ds *DefaultStore) GetTasks() []Task {
copyList := []Task{} copyList := []Task{}
@ -41,9 +41,9 @@ func (ds *DefaultStore) GetTasks() []Task {
return copyList return copyList
} }
//AddTasks implements the same method in Store interface. // AddTasks implements the same method in Store interface.
func (ds *DefaultStore) AddTasks(tasks ...Task) { func (ds *DefaultStore) AddTasks(tasks ...Task) {
//Double confirm. // Double confirm.
if ds.tasks == nil { if ds.tasks == nil {
ds.tasks = []Task{} ds.tasks = []Task{}
} }

View File

@ -9,28 +9,28 @@ import (
"sync" "sync"
) )
//Watcher is an asynchronous runner to provide an evaluation environment for the policy. // Watcher is an asynchronous runner to provide an evaluation environment for the policy.
type Watcher struct { type Watcher struct {
//Locker to sync related operations. // Locker to sync related operations.
*sync.RWMutex *sync.RWMutex
//The target policy. // The target policy.
p policy.Policy p policy.Policy
//The channel for receive stop signal. // The channel for receive stop signal.
cmdChan chan bool cmdChan chan bool
//Indicate whether the watcher is started and running. // Indicate whether the watcher is started and running.
isRunning bool isRunning bool
//Report stats to scheduler. // Report stats to scheduler.
stats chan *StatItem stats chan *StatItem
//If policy is automatically completed, report the policy to scheduler. // If policy is automatically completed, report the policy to scheduler.
doneChan chan *Watcher doneChan chan *Watcher
} }
//NewWatcher is used as a constructor. // NewWatcher is used as a constructor.
func NewWatcher(p policy.Policy, st chan *StatItem, done chan *Watcher) *Watcher { func NewWatcher(p policy.Policy, st chan *StatItem, done chan *Watcher) *Watcher {
return &Watcher{ return &Watcher{
RWMutex: new(sync.RWMutex), RWMutex: new(sync.RWMutex),
@ -42,9 +42,9 @@ func NewWatcher(p policy.Policy, st chan *StatItem, done chan *Watcher) *Watcher
} }
} }
//Start the running. // Start the running.
func (wc *Watcher) Start() { func (wc *Watcher) Start() {
//Lock for state changing // Lock for state changing
wc.Lock() wc.Lock()
defer wc.Unlock() defer wc.Unlock()
@ -74,13 +74,13 @@ func (wc *Watcher) Start() {
select { select {
case <-evalChan: case <-evalChan:
{ {
//If worker is not running, should not response any requests. // If worker is not running, should not response any requests.
if !wc.IsRunning() { if !wc.IsRunning() {
continue continue
} }
log.Infof("Receive evaluation signal from policy '%s'\n", pl.Name()) log.Infof("Receive evaluation signal from policy '%s'\n", pl.Name())
//Start to run the attached tasks. // Start to run the attached tasks.
for _, t := range pl.Tasks() { for _, t := range pl.Tasks() {
go func(tk task.Task) { go func(tk task.Task) {
defer func() { defer func() {
@ -93,7 +93,7 @@ func (wc *Watcher) Start() {
}() }()
err := tk.Run() err := tk.Run()
//Report task execution stats. // Report task execution stats.
st := &StatItem{statTaskComplete, 1, err} st := &StatItem{statTaskComplete, 1, err}
if err != nil { if err != nil {
st.Type = statTaskFail st.Type = statTaskFail
@ -103,7 +103,7 @@ func (wc *Watcher) Start() {
} }
}(t) }(t)
//Report task run stats. // Report task run stats.
st := &StatItem{statTaskRun, 1, nil} st := &StatItem{statTaskRun, 1, nil}
if wc.stats != nil { if wc.stats != nil {
wc.stats <- st wc.stats <- st
@ -112,8 +112,8 @@ func (wc *Watcher) Start() {
} }
case <-done: case <-done:
{ {
//Policy is automatically completed. // Policy is automatically completed.
//Report policy change stats. // Report policy change stats.
if wc.doneChan != nil { if wc.doneChan != nil {
wc.doneChan <- wc wc.doneChan <- wc
} }
@ -121,7 +121,7 @@ func (wc *Watcher) Start() {
return return
} }
case <-wc.cmdChan: case <-wc.cmdChan:
//Exit goroutine. // Exit goroutine.
return return
} }
} }
@ -130,9 +130,9 @@ func (wc *Watcher) Start() {
wc.isRunning = true wc.isRunning = true
} }
//Stop the running. // Stop the running.
func (wc *Watcher) Stop() { func (wc *Watcher) Stop() {
//Lock for state changing // Lock for state changing
wc.Lock() wc.Lock()
if !wc.isRunning { if !wc.isRunning {
wc.Unlock() wc.Unlock()
@ -142,18 +142,18 @@ func (wc *Watcher) Stop() {
wc.isRunning = false wc.isRunning = false
wc.Unlock() wc.Unlock()
//Disable policy. // Disable policy.
if wc.p != nil { if wc.p != nil {
wc.p.Disable() wc.p.Disable()
} }
//Stop watcher. // Stop watcher.
wc.cmdChan <- true wc.cmdChan <- true
log.Infof("Worker for policy %s is stopped.\n", wc.p.Name()) log.Infof("Worker for policy %s is stopped.\n", wc.p.Name())
} }
//IsRunning to indicate if the watcher is still running. // IsRunning to indicate if the watcher is still running.
func (wc *Watcher) IsRunning() bool { func (wc *Watcher) IsRunning() bool {
wc.RLock() wc.RLock()
defer wc.RUnlock() defer wc.RUnlock()

View File

@ -20,12 +20,12 @@ import (
"strings" "strings"
) )
//HeaderPrefix is the prefix of the value of Authorization header. // HeaderPrefix is the prefix of the value of Authorization header.
//It has the space. // It has the space.
const HeaderPrefix = "Harbor-Secret " const HeaderPrefix = "Harbor-Secret "
//FromRequest tries to get Harbor Secret from request header. // FromRequest tries to get Harbor Secret from request header.
//It will return empty string if the reqeust is nil. // It will return empty string if the reqeust is nil.
func FromRequest(req *http.Request) string { func FromRequest(req *http.Request) string {
if req == nil { if req == nil {
return "" return ""
@ -37,7 +37,7 @@ func FromRequest(req *http.Request) string {
return "" return ""
} }
//AddToRequest add the secret to request // AddToRequest add the secret to request
func AddToRequest(req *http.Request, secret string) error { func AddToRequest(req *http.Request, secret string) error {
if req == nil { if req == nil {
return fmt.Errorf("input request is nil, unable to set secret") return fmt.Errorf("input request is nil, unable to set secret")

View File

@ -34,8 +34,8 @@ type Context interface {
HasWritePerm(projectIDOrName interface{}) bool HasWritePerm(projectIDOrName interface{}) bool
// HasAllPerm returns whether the user has all permissions to the project // HasAllPerm returns whether the user has all permissions to the project
HasAllPerm(projectIDOrName interface{}) bool HasAllPerm(projectIDOrName interface{}) bool
//Get current user's all project // Get current user's all project
GetMyProjects() ([]*models.Project, error) GetMyProjects() ([]*models.Project, error)
//Get user's role in provided project // Get user's role in provided project
GetProjectRoles(projectIDOrName interface{}) []int GetProjectRoles(projectIDOrName interface{}) []int
} }

View File

@ -184,11 +184,11 @@ func (s *SecurityContext) GetRolesByGroup(projectIDOrName interface{}) []int {
var roles []int var roles []int
user := s.user user := s.user
project, err := s.pm.Get(projectIDOrName) project, err := s.pm.Get(projectIDOrName)
//No user, group or project info // No user, group or project info
if err != nil || project == nil || user == nil || len(user.GroupList) == 0 { if err != nil || project == nil || user == nil || len(user.GroupList) == 0 {
return roles return roles
} }
//Get role by LDAP group // Get role by LDAP group
groupDNConditions := group.GetGroupDNQueryCondition(user.GroupList) groupDNConditions := group.GetGroupDNQueryCondition(user.GroupList)
roles, err = dao.GetRolesByLDAPGroup(project.ProjectID, groupDNConditions) roles, err = dao.GetRolesByLDAPGroup(project.ProjectID, groupDNConditions)
if err != nil { if err != nil {

View File

@ -28,7 +28,7 @@ func TestIsAuthenticated(t *testing.T) {
isAuthenticated := context.IsAuthenticated() isAuthenticated := context.IsAuthenticated()
assert.False(t, isAuthenticated) assert.False(t, isAuthenticated)
//invalid secret // invalid secret
context = NewSecurityContext("invalid_secret", context = NewSecurityContext("invalid_secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"secret": "username", "secret": "username",
@ -36,7 +36,7 @@ func TestIsAuthenticated(t *testing.T) {
isAuthenticated = context.IsAuthenticated() isAuthenticated = context.IsAuthenticated()
assert.False(t, isAuthenticated) assert.False(t, isAuthenticated)
//valid secret // valid secret
context = NewSecurityContext("secret", context = NewSecurityContext("secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"secret": "username", "secret": "username",
@ -51,7 +51,7 @@ func TestGetUsername(t *testing.T) {
username := context.GetUsername() username := context.GetUsername()
assert.Equal(t, "", username) assert.Equal(t, "", username)
//invalid secret // invalid secret
context = NewSecurityContext("invalid_secret", context = NewSecurityContext("invalid_secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"secret": "username", "secret": "username",
@ -59,7 +59,7 @@ func TestGetUsername(t *testing.T) {
username = context.GetUsername() username = context.GetUsername()
assert.Equal(t, "", username) assert.Equal(t, "", username)
//valid secret // valid secret
context = NewSecurityContext("secret", context = NewSecurityContext("secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"secret": "username", "secret": "username",
@ -101,7 +101,7 @@ func TestHasReadPerm(t *testing.T) {
hasReadPerm := context.HasReadPerm("project_name") hasReadPerm := context.HasReadPerm("project_name")
assert.False(t, hasReadPerm) assert.False(t, hasReadPerm)
//invalid secret // invalid secret
context = NewSecurityContext("invalid_secret", context = NewSecurityContext("invalid_secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"jobservice_secret": secret.JobserviceUser, "jobservice_secret": secret.JobserviceUser,
@ -109,7 +109,7 @@ func TestHasReadPerm(t *testing.T) {
hasReadPerm = context.HasReadPerm("project_name") hasReadPerm = context.HasReadPerm("project_name")
assert.False(t, hasReadPerm) assert.False(t, hasReadPerm)
//valid secret, project name // valid secret, project name
context = NewSecurityContext("jobservice_secret", context = NewSecurityContext("jobservice_secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"jobservice_secret": secret.JobserviceUser, "jobservice_secret": secret.JobserviceUser,
@ -117,7 +117,7 @@ func TestHasReadPerm(t *testing.T) {
hasReadPerm = context.HasReadPerm("project_name") hasReadPerm = context.HasReadPerm("project_name")
assert.True(t, hasReadPerm) assert.True(t, hasReadPerm)
//valid secret, project ID // valid secret, project ID
hasReadPerm = context.HasReadPerm(1) hasReadPerm = context.HasReadPerm(1)
assert.True(t, hasReadPerm) assert.True(t, hasReadPerm)
} }
@ -163,7 +163,7 @@ func TestGetMyProjects(t *testing.T) {
} }
func TestGetProjectRoles(t *testing.T) { func TestGetProjectRoles(t *testing.T) {
//invalid secret // invalid secret
context := NewSecurityContext("invalid_secret", context := NewSecurityContext("invalid_secret",
secret.NewStore(map[string]string{ secret.NewStore(map[string]string{
"jobservice_secret": secret.JobserviceUser, "jobservice_secret": secret.JobserviceUser,

View File

@ -30,7 +30,7 @@ import (
// Client communicates with clair endpoint to scan image and get detailed scan result // Client communicates with clair endpoint to scan image and get detailed scan result
type Client struct { type Client struct {
endpoint string endpoint string
//need to customize the logger to write output to job log. // need to customize the logger to write output to job log.
logger *log.Logger logger *log.Logger
client *http.Client client *http.Client
} }

View File

@ -22,7 +22,7 @@ import (
"strings" "strings"
) )
//var client = NewClient() // var client = NewClient()
// ParseClairSev parse the severity of clair to Harbor's Severity type if the string is not recognized the value will be set to unknown. // ParseClairSev parse the severity of clair to Harbor's Severity type if the string is not recognized the value will be set to unknown.
func ParseClairSev(clairSev string) models.Severity { func ParseClairSev(clairSev string) models.Severity {
@ -94,7 +94,7 @@ func transformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOver
}, overallSev }, overallSev
} }
//TransformVuln is for running scanning job in both job service V1 and V2. // TransformVuln is for running scanning job in both job service V1 and V2.
func TransformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOverview, models.Severity) { func TransformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOverview, models.Severity) {
return transformVuln(clairVuln) return transformVuln(clairVuln)
} }

View File

@ -120,7 +120,7 @@ func newClient(addr, identity, username, password string,
return nil, err return nil, err
} }
//try to swith to SSL/TLS // try to swith to SSL/TLS
if !tls { if !tls {
if ok, _ := client.Extension("STARTTLS"); ok { if ok, _ := client.Extension("STARTTLS"); ok {
log.Debugf("switching the connection with %s to SSL/TLS ...", addr) log.Debugf("switching the connection with %s to SSL/TLS ...", addr)

View File

@ -38,9 +38,9 @@ func TestSend(t *testing.T) {
err := Send(addr, identity, username, password, err := Send(addr, identity, username, password,
timeout, tls, insecure, from, to, timeout, tls, insecure, from, to,
subject, message) subject, message)
//bypass the check due to securty policy change on gmail // bypass the check due to securty policy change on gmail
//TODO // TODO
//assert.Nil(t, err) // assert.Nil(t, err)
/*not work on travis /*not work on travis
// non-tls connection // non-tls connection
@ -52,7 +52,7 @@ func TestSend(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
*/ */
//invalid username/password // invalid username/password
username = "invalid_username" username = "invalid_username"
err = Send(addr, identity, username, password, err = Send(addr, identity, username, password,
timeout, tls, insecure, from, to, timeout, tls, insecure, from, to,
@ -78,9 +78,9 @@ func TestPing(t *testing.T) {
// tls connection // tls connection
err := Ping(addr, identity, username, password, err := Ping(addr, identity, username, password,
timeout, tls, insecure) timeout, tls, insecure)
//bypass the check due to securty policy change on gmail // bypass the check due to securty policy change on gmail
//TODO // TODO
//assert.Nil(t, err) // assert.Nil(t, err)
/*not work on travis /*not work on travis
// non-tls connection // non-tls connection
@ -91,7 +91,7 @@ func TestPing(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
*/ */
//invalid username/password // invalid username/password
username = "invalid_username" username = "invalid_username"
err = Ping(addr, identity, username, password, err = Ping(addr, identity, username, password,
timeout, tls, insecure) timeout, tls, insecure)

View File

@ -65,7 +65,7 @@ func ReversibleDecrypt(str, key string) (string, error) {
str = str[len(EncryptHeaderV1):] str = str[len(EncryptHeaderV1):]
return decryptAES(str, key) return decryptAES(str, key)
} }
//fallback to base64 // fallback to base64
return decodeB64(str) return decodeB64(str)
} }

View File

@ -30,20 +30,20 @@ import (
goldap "gopkg.in/ldap.v2" goldap "gopkg.in/ldap.v2"
) )
//ErrNotFound ... // ErrNotFound ...
var ErrNotFound = errors.New("entity not found") var ErrNotFound = errors.New("entity not found")
//ErrDNSyntax ... // ErrDNSyntax ...
var ErrDNSyntax = errors.New("Invalid DN syntax") var ErrDNSyntax = errors.New("Invalid DN syntax")
//Session - define a LDAP session // Session - define a LDAP session
type Session struct { type Session struct {
ldapConfig models.LdapConf ldapConfig models.LdapConf
ldapGroupConfig models.LdapGroupConf ldapGroupConfig models.LdapGroupConf
ldapConn *goldap.Conn ldapConn *goldap.Conn
} }
//LoadSystemLdapConfig - load LDAP configure from adminserver // LoadSystemLdapConfig - load LDAP configure from adminserver
func LoadSystemLdapConfig() (*Session, error) { func LoadSystemLdapConfig() (*Session, error) {
authMode, err := config.AuthMode() authMode, err := config.AuthMode()
@ -71,7 +71,7 @@ func LoadSystemLdapConfig() (*Session, error) {
return CreateWithAllConfig(*ldapConf, *ldapGroupConfig) return CreateWithAllConfig(*ldapConf, *ldapGroupConfig)
} }
//CreateWithConfig - // CreateWithConfig -
func CreateWithConfig(ldapConf models.LdapConf) (*Session, error) { func CreateWithConfig(ldapConf models.LdapConf) (*Session, error) {
return CreateWithAllConfig(ldapConf, models.LdapGroupConf{}) return CreateWithAllConfig(ldapConf, models.LdapGroupConf{})
} }
@ -140,7 +140,7 @@ func formatURL(ldapURL string) (string, error) {
} }
//ConnectionTest - test ldap session connection with system default setting // ConnectionTest - test ldap session connection with system default setting
func (session *Session) ConnectionTest() error { func (session *Session) ConnectionTest() error {
session, err := LoadSystemLdapConfig() session, err := LoadSystemLdapConfig()
if err != nil { if err != nil {
@ -150,12 +150,12 @@ func (session *Session) ConnectionTest() error {
return ConnectionTestWithAllConfig(session.ldapConfig, session.ldapGroupConfig) return ConnectionTestWithAllConfig(session.ldapConfig, session.ldapGroupConfig)
} }
//ConnectionTestWithConfig - // ConnectionTestWithConfig -
func ConnectionTestWithConfig(ldapConfig models.LdapConf) error { func ConnectionTestWithConfig(ldapConfig models.LdapConf) error {
return ConnectionTestWithAllConfig(ldapConfig, models.LdapGroupConf{}) return ConnectionTestWithAllConfig(ldapConfig, models.LdapGroupConf{})
} }
//ConnectionTestWithAllConfig - test ldap session connection, out of the scope of normal session create/close // ConnectionTestWithAllConfig - test ldap session connection, out of the scope of normal session create/close
func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig models.LdapGroupConf) error { func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig models.LdapGroupConf) error {
authMode, err := config.AuthMode() authMode, err := config.AuthMode()
@ -164,7 +164,7 @@ func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig mod
return err return err
} }
//If no password present, use the system default password // If no password present, use the system default password
if ldapConfig.LdapSearchPassword == "" && authMode == "ldap_auth" { if ldapConfig.LdapSearchPassword == "" && authMode == "ldap_auth" {
session, err := LoadSystemLdapConfig() session, err := LoadSystemLdapConfig()
@ -199,7 +199,7 @@ func ConnectionTestWithAllConfig(ldapConfig models.LdapConf, ldapGroupConfig mod
return nil return nil
} }
//SearchUser - search LDAP user by name // SearchUser - search LDAP user by name
func (session *Session) SearchUser(username string) ([]models.LdapUser, error) { func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
var ldapUsers []models.LdapUser var ldapUsers []models.LdapUser
ldapFilter := session.createUserFilter(username) ldapFilter := session.createUserFilter(username)
@ -213,7 +213,7 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
var u models.LdapUser var u models.LdapUser
groupDNList := []string{} groupDNList := []string{}
for _, attr := range ldapEntry.Attributes { for _, attr := range ldapEntry.Attributes {
//OpenLdap sometimes contain leading space in useranme // OpenLdap sometimes contain leading space in useranme
val := strings.TrimSpace(attr.Values[0]) val := strings.TrimSpace(attr.Values[0])
log.Debugf("Current ldap entry attr name: %s\n", attr.Name) log.Debugf("Current ldap entry attr name: %s\n", attr.Name)
switch strings.ToLower(attr.Name) { switch strings.ToLower(attr.Name) {
@ -249,7 +249,7 @@ func (session *Session) Bind(dn string, password string) error {
return session.ldapConn.Bind(dn, password) return session.ldapConn.Bind(dn, password)
} }
//Open - open Session // Open - open Session
func (session *Session) Open() error { func (session *Session) Open() error {
splitLdapURL := strings.Split(session.ldapConfig.LdapURL, "://") splitLdapURL := strings.Split(session.ldapConfig.LdapURL, "://")
@ -305,9 +305,9 @@ func (session *Session) SearchLdapAttribute(baseDN, filter string, attributes []
baseDN, baseDN,
session.ldapConfig.LdapScope, session.ldapConfig.LdapScope,
goldap.NeverDerefAliases, goldap.NeverDerefAliases,
0, //Unlimited results 0, // Unlimited results
0, //Search Timeout 0, // Search Timeout
false, //Types only false, // Types only
filter, filter,
attributes, attributes,
nil, nil,
@ -329,7 +329,7 @@ func (session *Session) SearchLdapAttribute(baseDN, filter string, attributes []
} }
//CreateUserFilter - create filter to search user with specified username // CreateUserFilter - create filter to search user with specified username
func (session *Session) createUserFilter(username string) string { func (session *Session) createUserFilter(username string) string {
var filterTag string var filterTag string
@ -353,14 +353,14 @@ func (session *Session) createUserFilter(username string) string {
return ldapFilter return ldapFilter
} }
//Close - close current session // Close - close current session
func (session *Session) Close() { func (session *Session) Close() {
if session.ldapConn != nil { if session.ldapConn != nil {
session.ldapConn.Close() session.ldapConn.Close()
} }
} }
//SearchGroupByName ... // SearchGroupByName ...
func (session *Session) SearchGroupByName(groupName string) ([]models.LdapGroup, error) { func (session *Session) SearchGroupByName(groupName string) ([]models.LdapGroup, error) {
return session.searchGroup(session.ldapGroupConfig.LdapGroupBaseDN, return session.searchGroup(session.ldapGroupConfig.LdapGroupBaseDN,
session.ldapGroupConfig.LdapGroupFilter, session.ldapGroupConfig.LdapGroupFilter,
@ -368,7 +368,7 @@ func (session *Session) SearchGroupByName(groupName string) ([]models.LdapGroup,
session.ldapGroupConfig.LdapGroupNameAttribute) session.ldapGroupConfig.LdapGroupNameAttribute)
} }
//SearchGroupByDN ... // SearchGroupByDN ...
func (session *Session) SearchGroupByDN(groupDN string) ([]models.LdapGroup, error) { func (session *Session) SearchGroupByDN(groupDN string) ([]models.LdapGroup, error) {
if _, err := goldap.ParseDN(groupDN); err != nil { if _, err := goldap.ParseDN(groupDN); err != nil {
return nil, ErrDNSyntax return nil, ErrDNSyntax
@ -396,7 +396,7 @@ func (session *Session) searchGroup(baseDN, filter, groupName, groupNameAttribut
var group models.LdapGroup var group models.LdapGroup
group.GroupDN = ldapEntry.DN group.GroupDN = ldapEntry.DN
for _, attr := range ldapEntry.Attributes { for _, attr := range ldapEntry.Attributes {
//OpenLdap sometimes contain leading space in useranme // OpenLdap sometimes contain leading space in useranme
val := strings.TrimSpace(attr.Values[0]) val := strings.TrimSpace(attr.Values[0])
log.Debugf("Current ldap entry attr name: %s\n", attr.Name) log.Debugf("Current ldap entry attr name: %s\n", attr.Name)
switch strings.ToLower(attr.Name) { switch strings.ToLower(attr.Name) {

View File

@ -23,7 +23,7 @@ var adminServerLdapTestConfig = map[string]interface{}{
common.PostGreSQLUsername: "postgres", common.PostGreSQLUsername: "postgres",
common.PostGreSQLPassword: "root123", common.PostGreSQLPassword: "root123",
common.PostGreSQLDatabase: "registry", common.PostGreSQLDatabase: "registry",
//config.SelfRegistration: true, // config.SelfRegistration: true,
common.LDAPURL: "ldap://127.0.0.1", common.LDAPURL: "ldap://127.0.0.1",
common.LDAPSearchDN: "cn=admin,dc=example,dc=com", common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
common.LDAPSearchPwd: "admin", common.LDAPSearchPwd: "admin",

View File

@ -64,12 +64,12 @@ func New(out io.Writer, fmtter Formatter, lvl Level) *Logger {
} }
} }
//DefaultLogger returns the default logger within the pkg, i.e. the one used in log.Infof.... // DefaultLogger returns the default logger within the pkg, i.e. the one used in log.Infof....
func DefaultLogger() *Logger { func DefaultLogger() *Logger {
return logger return logger
} }
//SetOutput sets the output of Logger l // SetOutput sets the output of Logger l
func (l *Logger) SetOutput(out io.Writer) { func (l *Logger) SetOutput(out io.Writer) {
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
@ -77,7 +77,7 @@ func (l *Logger) SetOutput(out io.Writer) {
l.out = out l.out = out
} }
//SetFormatter sets the formatter of Logger l // SetFormatter sets the formatter of Logger l
func (l *Logger) SetFormatter(fmtter Formatter) { func (l *Logger) SetFormatter(fmtter Formatter) {
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
@ -85,7 +85,7 @@ func (l *Logger) SetFormatter(fmtter Formatter) {
l.fmtter = fmtter l.fmtter = fmtter
} }
//SetLevel sets the level of Logger l // SetLevel sets the level of Logger l
func (l *Logger) SetLevel(lvl Level) { func (l *Logger) SetLevel(lvl Level) {
l.mu.Lock() l.mu.Lock()
defer l.mu.Unlock() defer l.mu.Unlock()
@ -93,17 +93,17 @@ func (l *Logger) SetLevel(lvl Level) {
l.lvl = lvl l.lvl = lvl
} }
//SetOutput sets the output of default Logger // SetOutput sets the output of default Logger
func SetOutput(out io.Writer) { func SetOutput(out io.Writer) {
logger.SetOutput(out) logger.SetOutput(out)
} }
//SetFormatter sets the formatter of default Logger // SetFormatter sets the formatter of default Logger
func SetFormatter(fmtter Formatter) { func SetFormatter(fmtter Formatter) {
logger.SetFormatter(fmtter) logger.SetFormatter(fmtter)
} }
//SetLevel sets the level of default Logger // SetLevel sets the level of default Logger
func SetLevel(lvl Level) { func SetLevel(lvl Level) {
logger.SetLevel(lvl) logger.SetLevel(lvl)
} }

View File

@ -46,7 +46,7 @@ var (
type Target struct { type Target struct {
Tag string `json:"tag"` Tag string `json:"tag"`
Hashes data.Hashes `json:"hashes"` Hashes data.Hashes `json:"hashes"`
//TODO: update fields as needed. // TODO: update fields as needed.
} }
func init() { func init() {
@ -102,7 +102,7 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target
} else if err != nil { } else if err != nil {
return res, err return res, err
} }
//Remove root.json such that when remote repository is removed the local cache can't be reused. // Remove root.json such that when remote repository is removed the local cache can't be reused.
rootJSON := path.Join(notaryCachePath, "tuf", fqRepo, "metadata/root.json") rootJSON := path.Join(notaryCachePath, "tuf", fqRepo, "metadata/root.json")
rmErr := os.Remove(rootJSON) rmErr := os.Remove(rootJSON)
if rmErr != nil { if rmErr != nil {

View File

@ -30,7 +30,7 @@ import (
) )
const ( const (
latency int = 10 //second, the network latency when token is received latency int = 10 // second, the network latency when token is received
scheme = "bearer" scheme = "bearer"
) )
@ -51,7 +51,7 @@ type tokenAuthorizer struct {
// add token to the request // add token to the request
func (t *tokenAuthorizer) Modify(req *http.Request) error { func (t *tokenAuthorizer) Modify(req *http.Request) error {
//only handle requests sent to registry // only handle requests sent to registry
goon, err := t.filterReq(req) goon, err := t.filterReq(req)
if err != nil { if err != nil {
return err return err

View File

@ -110,7 +110,7 @@ func (r *Registry) Catalog() ([]string, error) {
} }
repos = append(repos, catalogResp.Repositories...) repos = append(repos, catalogResp.Repositories...)
//Link: </v2/_catalog?last=library%2Fhello-world-25&n=100>; rel="next" // Link: </v2/_catalog?last=library%2Fhello-world-25&n=100>; rel="next"
link := resp.Header.Get("Link") link := resp.Header.Get("Link")
if strings.HasSuffix(link, `rel="next"`) && strings.Index(link, "<") >= 0 && strings.Index(link, ">") >= 0 { if strings.HasSuffix(link, `rel="next"`) && strings.Index(link, "<") >= 0 && strings.Index(link, ">") >= 0 {
suffix = link[strings.Index(link, "<")+1 : strings.Index(link, ">")] suffix = link[strings.Index(link, "<")+1 : strings.Index(link, ">")]

View File

@ -29,35 +29,35 @@ var (
once sync.Once once sync.Once
) )
//TimeMarker is used to control an action not to be taken frequently within the interval // TimeMarker is used to control an action not to be taken frequently within the interval
type TimeMarker struct { type TimeMarker struct {
sync.RWMutex sync.RWMutex
next time.Time next time.Time
interval time.Duration interval time.Duration
} }
//Mark tries to mark a future time, which is after the duration of interval from the time it's called. // Mark tries to mark a future time, which is after the duration of interval from the time it's called.
func (t *TimeMarker) Mark() { func (t *TimeMarker) Mark() {
t.Lock() t.Lock()
defer t.Unlock() defer t.Unlock()
t.next = time.Now().Add(t.interval) t.next = time.Now().Add(t.interval)
} }
//Check returns true if the current time is after the mark by this marker, and the caction the mark guards and be taken. // Check returns true if the current time is after the mark by this marker, and the caction the mark guards and be taken.
func (t *TimeMarker) Check() bool { func (t *TimeMarker) Check() bool {
t.RLock() t.RLock()
defer t.RUnlock() defer t.RUnlock()
return time.Now().After(t.next) return time.Now().After(t.next)
} }
//Next returns the time of the next mark. // Next returns the time of the next mark.
func (t *TimeMarker) Next() time.Time { func (t *TimeMarker) Next() time.Time {
t.RLock() t.RLock()
defer t.RUnlock() defer t.RUnlock()
return t.next return t.next
} }
//ScanAllMarker ... // ScanAllMarker ...
func ScanAllMarker() *TimeMarker { func ScanAllMarker() *TimeMarker {
once.Do(func() { once.Do(func() {
a := os.Getenv("HARBOR_SCAN_ALL_INTERVAL") a := os.Getenv("HARBOR_SCAN_ALL_INTERVAL")
@ -74,7 +74,7 @@ func ScanAllMarker() *TimeMarker {
return scanAllMarker return scanAllMarker
} }
//ScanOverviewMarker ... // ScanOverviewMarker ...
func ScanOverviewMarker() *TimeMarker { func ScanOverviewMarker() *TimeMarker {
return scanOverviewMarker return scanOverviewMarker
} }

View File

@ -31,13 +31,13 @@ import (
) )
const ( const (
//TokenURLSuffix ... // TokenURLSuffix ...
TokenURLSuffix = "/oauth/token" TokenURLSuffix = "/oauth/token"
//AuthURLSuffix ... // AuthURLSuffix ...
AuthURLSuffix = "/oauth/authorize" AuthURLSuffix = "/oauth/authorize"
//UserInfoURLSuffix ... // UserInfoURLSuffix ...
UserInfoURLSuffix = "/userinfo" UserInfoURLSuffix = "/userinfo"
//UsersURLSuffix ... // UsersURLSuffix ...
UsersURLSuffix = "/Users" UsersURLSuffix = "/Users"
) )
@ -45,13 +45,13 @@ var uaaTransport = &http.Transport{}
// Client provides funcs to interact with UAA. // Client provides funcs to interact with UAA.
type Client interface { type Client interface {
//PasswordAuth accepts username and password, return a token if it's valid. // PasswordAuth accepts username and password, return a token if it's valid.
PasswordAuth(username, password string) (*oauth2.Token, error) PasswordAuth(username, password string) (*oauth2.Token, error)
//GetUserInfoByToken send the token to OIDC endpoint to get user info, currently it's also used to validate the token. // GetUserInfoByToken send the token to OIDC endpoint to get user info, currently it's also used to validate the token.
GetUserInfo(token string) (*UserInfo, error) GetUserInfo(token string) (*UserInfo, error)
//SearchUser searches a user based on user name. // SearchUser searches a user based on user name.
SearchUser(name string) ([]*SearchUserEntry, error) SearchUser(name string) ([]*SearchUserEntry, error)
//UpdateConfig updates the config of the current client // UpdateConfig updates the config of the current client
UpdateConfig(cfg *ClientConfig) error UpdateConfig(cfg *ClientConfig) error
} }
@ -61,7 +61,7 @@ type ClientConfig struct {
ClientSecret string ClientSecret string
Endpoint string Endpoint string
SkipTLSVerify bool SkipTLSVerify bool
//Absolut path for CA root used to communicate with UAA, only effective when skipTLSVerify set to false. // Absolut path for CA root used to communicate with UAA, only effective when skipTLSVerify set to false.
CARootPath string CARootPath string
} }
@ -76,13 +76,13 @@ type UserInfo struct {
Email string `json:"email"` Email string `json:"email"`
} }
//SearchUserEmailEntry ... // SearchUserEmailEntry ...
type SearchUserEmailEntry struct { type SearchUserEmailEntry struct {
Value string `json:"value"` Value string `json:"value"`
Primary bool `json:"primary"` Primary bool `json:"primary"`
} }
//SearchUserEntry is the struct of an entry of user within search result. // SearchUserEntry is the struct of an entry of user within search result.
type SearchUserEntry struct { type SearchUserEntry struct {
ID string `json:"id"` ID string `json:"id"`
ExtID string `json:"externalId"` ExtID string `json:"externalId"`
@ -91,7 +91,7 @@ type SearchUserEntry struct {
Groups []interface{} Groups []interface{}
} }
//SearchUserRes is the struct to parse the result of search user API of UAA // SearchUserRes is the struct to parse the result of search user API of UAA
type SearchUserRes struct { type SearchUserRes struct {
Resources []*SearchUserEntry `json:"resources"` Resources []*SearchUserEntry `json:"resources"`
TotalResults int `json:"totalResults"` TotalResults int `json:"totalResults"`
@ -104,7 +104,7 @@ type defaultClient struct {
oauth2Cfg *oauth2.Config oauth2Cfg *oauth2.Config
twoLegCfg *clientcredentials.Config twoLegCfg *clientcredentials.Config
endpoint string endpoint string
//TODO: add public key, etc... // TODO: add public key, etc...
} }
func (dc *defaultClient) PasswordAuth(username, password string) (*oauth2.Token, error) { func (dc *defaultClient) PasswordAuth(username, password string) (*oauth2.Token, error) {
@ -190,7 +190,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
return err return err
} }
pool := x509.NewCertPool() pool := x509.NewCertPool()
//Do not throw error if the certificate is malformed, so we can put a place holder. // Do not throw error if the certificate is malformed, so we can put a place holder.
if ok := pool.AppendCertsFromPEM(content); !ok { if ok := pool.AppendCertsFromPEM(content); !ok {
log.Warningf("Failed to append certificate to cert pool, cert path: %s", cfg.CARootPath) log.Warningf("Failed to append certificate to cert pool, cert path: %s", cfg.CARootPath)
} else { } else {
@ -202,7 +202,7 @@ func (dc *defaultClient) UpdateConfig(cfg *ClientConfig) error {
} }
uaaTransport.TLSClientConfig = tc uaaTransport.TLSClientConfig = tc
dc.httpClient.Transport = uaaTransport dc.httpClient.Transport = uaaTransport
//dc.httpClient.Transport = transport. // dc.httpClient.Transport = transport.
oc := &oauth2.Config{ oc := &oauth2.Config{
ClientID: cfg.ClientID, ClientID: cfg.ClientID,

View File

@ -99,7 +99,7 @@ func TestNewClientWithCACert(t *testing.T) {
} }
_, err := NewDefaultClient(cfg) _, err := NewDefaultClient(cfg)
assert.Nil(err) assert.Nil(err)
//Skip if it's malformed. // Skip if it's malformed.
cfg.CARootPath = path.Join(currPath(), "test", "non-ca.pem") cfg.CARootPath = path.Join(currPath(), "test", "non-ca.pem")
_, err = NewDefaultClient(cfg) _, err = NewDefaultClient(cfg)
assert.Nil(err) assert.Nil(err)

View File

@ -128,7 +128,7 @@ func ParseTimeStamp(timestamp string) (*time.Time, error) {
return &t, nil return &t, nil
} }
//ConvertMapToStruct is used to fill the specified struct with map. // ConvertMapToStruct is used to fill the specified struct with map.
func ConvertMapToStruct(object interface{}, values interface{}) error { func ConvertMapToStruct(object interface{}, values interface{}) error {
if object == nil { if object == nil {
return errors.New("nil struct is not supported") return errors.New("nil struct is not supported")
@ -168,7 +168,7 @@ func ParseProjectIDOrName(value interface{}) (int64, string, error) {
return id, name, nil return id, name, nil
} }
//SafeCastString -- cast a object to string saftely // SafeCastString -- cast a object to string saftely
func SafeCastString(value interface{}) string { func SafeCastString(value interface{}) string {
if result, ok := value.(string); ok { if result, ok := value.(string); ok {
return result return result
@ -176,7 +176,7 @@ func SafeCastString(value interface{}) string {
return "" return ""
} }
//SafeCastInt -- // SafeCastInt --
func SafeCastInt(value interface{}) int { func SafeCastInt(value interface{}) int {
if result, ok := value.(int); ok { if result, ok := value.(int); ok {
return result return result
@ -184,7 +184,7 @@ func SafeCastInt(value interface{}) int {
return 0 return 0
} }
//SafeCastBool -- // SafeCastBool --
func SafeCastBool(value interface{}) bool { func SafeCastBool(value interface{}) bool {
if result, ok := value.(bool); ok { if result, ok := value.(bool); ok {
return result return result
@ -192,7 +192,7 @@ func SafeCastBool(value interface{}) bool {
return false return false
} }
//SafeCastFloat64 -- // SafeCastFloat64 --
func SafeCastFloat64(value interface{}) float64 { func SafeCastFloat64(value interface{}) float64 {
if result, ok := value.(float64); ok { if result, ok := value.(float64); ok {
return result return result

View File

@ -121,7 +121,7 @@ func TestReversibleEncrypt(t *testing.T) {
if decrypted != password { if decrypted != password {
t.Errorf("decrypted password: %s, is not identical to original", decrypted) t.Errorf("decrypted password: %s, is not identical to original", decrypted)
} }
//Test b64 for backward compatibility // Test b64 for backward compatibility
b64password := base64.StdEncoding.EncodeToString([]byte(password)) b64password := base64.StdEncoding.EncodeToString([]byte(password))
decrypted, err = ReversibleDecrypt(b64password, key) decrypted, err = ReversibleDecrypt(b64password, key)
if err != nil { if err != nil {

View File

@ -17,22 +17,22 @@ const (
authHeader = "Authorization" authHeader = "Authorization"
) )
//Authenticator defined behaviors of doing auth checking. // Authenticator defined behaviors of doing auth checking.
type Authenticator interface { type Authenticator interface {
//Auth incoming request // Auth incoming request
// //
//req *http.Request: the incoming request // req *http.Request: the incoming request
// //
//Returns: // Returns:
// nil returned if successfully done // nil returned if successfully done
// otherwise an error returned // otherwise an error returned
DoAuth(req *http.Request) error DoAuth(req *http.Request) error
} }
//SecretAuthenticator implements interface 'Authenticator' based on simple secret. // SecretAuthenticator implements interface 'Authenticator' based on simple secret.
type SecretAuthenticator struct{} type SecretAuthenticator struct{}
//DoAuth implements same method in interface 'Authenticator'. // DoAuth implements same method in interface 'Authenticator'.
func (sa *SecretAuthenticator) DoAuth(req *http.Request) error { func (sa *SecretAuthenticator) DoAuth(req *http.Request) error {
if req == nil { if req == nil {
return errors.New("nil request") return errors.New("nil request")
@ -48,7 +48,7 @@ func (sa *SecretAuthenticator) DoAuth(req *http.Request) error {
} }
secret := strings.TrimSpace(strings.TrimPrefix(h, secretPrefix)) secret := strings.TrimSpace(strings.TrimPrefix(h, secretPrefix))
//incase both two are empty // incase both two are empty
if utils.IsEmptyStr(secret) { if utils.IsEmptyStr(secret) {
return errors.New("empty secret is not allowed") return errors.New("empty secret is not allowed")
} }

View File

@ -18,37 +18,37 @@ import (
"github.com/goharbor/harbor/src/jobservice/opm" "github.com/goharbor/harbor/src/jobservice/opm"
) )
//Handler defines approaches to handle the http requests. // Handler defines approaches to handle the http requests.
type Handler interface { type Handler interface {
//HandleLaunchJobReq is used to handle the job submission request. // HandleLaunchJobReq is used to handle the job submission request.
HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request)
//HandleGetJobReq is used to handle the job stats query request. // HandleGetJobReq is used to handle the job stats query request.
HandleGetJobReq(w http.ResponseWriter, req *http.Request) HandleGetJobReq(w http.ResponseWriter, req *http.Request)
//HandleJobActionReq is used to handle the job action requests (stop/retry). // HandleJobActionReq is used to handle the job action requests (stop/retry).
HandleJobActionReq(w http.ResponseWriter, req *http.Request) HandleJobActionReq(w http.ResponseWriter, req *http.Request)
//HandleCheckStatusReq is used to handle the job service healthy status checking request. // HandleCheckStatusReq is used to handle the job service healthy status checking request.
HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request)
//HandleJobLogReq is used to handle the request of getting job logs // HandleJobLogReq is used to handle the request of getting job logs
HandleJobLogReq(w http.ResponseWriter, req *http.Request) HandleJobLogReq(w http.ResponseWriter, req *http.Request)
} }
//DefaultHandler is the default request handler which implements the Handler interface. // DefaultHandler is the default request handler which implements the Handler interface.
type DefaultHandler struct { type DefaultHandler struct {
controller core.Interface controller core.Interface
} }
//NewDefaultHandler is constructor of DefaultHandler. // NewDefaultHandler is constructor of DefaultHandler.
func NewDefaultHandler(ctl core.Interface) *DefaultHandler { func NewDefaultHandler(ctl core.Interface) *DefaultHandler {
return &DefaultHandler{ return &DefaultHandler{
controller: ctl, controller: ctl,
} }
} }
//HandleLaunchJobReq is implementation of method defined in interface 'Handler' // HandleLaunchJobReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) { func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w) { if !dh.preCheck(w) {
return return
@ -60,14 +60,14 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
return return
} }
//unmarshal data // unmarshal data
jobReq := models.JobRequest{} jobReq := models.JobRequest{}
if err = json.Unmarshal(data, &jobReq); err != nil { if err = json.Unmarshal(data, &jobReq); err != nil {
dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err)) dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err))
return return
} }
//Pass request to the controller for the follow-up. // Pass request to the controller for the follow-up.
jobStats, err := dh.controller.LaunchJob(jobReq) jobStats, err := dh.controller.LaunchJob(jobReq)
if err != nil { if err != nil {
dh.handleError(w, http.StatusInternalServerError, errs.LaunchJobError(err)) dh.handleError(w, http.StatusInternalServerError, errs.LaunchJobError(err))
@ -83,7 +83,7 @@ func (dh *DefaultHandler) HandleLaunchJobReq(w http.ResponseWriter, req *http.Re
w.Write(data) w.Write(data)
} }
//HandleGetJobReq is implementation of method defined in interface 'Handler' // HandleGetJobReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Request) { func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w) { if !dh.preCheck(w) {
return return
@ -113,7 +113,7 @@ func (dh *DefaultHandler) HandleGetJobReq(w http.ResponseWriter, req *http.Reque
w.Write(data) w.Write(data)
} }
//HandleJobActionReq is implementation of method defined in interface 'Handler' // HandleJobActionReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Request) { func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w) { if !dh.preCheck(w) {
return return
@ -128,7 +128,7 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
return return
} }
//unmarshal data // unmarshal data
jobActionReq := models.JobActionRequest{} jobActionReq := models.JobActionRequest{}
if err = json.Unmarshal(data, &jobActionReq); err != nil { if err = json.Unmarshal(data, &jobActionReq); err != nil {
dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err)) dh.handleError(w, http.StatusInternalServerError, errs.HandleJSONDataError(err))
@ -174,10 +174,10 @@ func (dh *DefaultHandler) HandleJobActionReq(w http.ResponseWriter, req *http.Re
return return
} }
w.WriteHeader(http.StatusNoContent) //only header, no content returned w.WriteHeader(http.StatusNoContent) // only header, no content returned
} }
//HandleCheckStatusReq is implementation of method defined in interface 'Handler' // HandleCheckStatusReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) { func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w) { if !dh.preCheck(w) {
return return
@ -198,7 +198,7 @@ func (dh *DefaultHandler) HandleCheckStatusReq(w http.ResponseWriter, req *http.
w.Write(data) w.Write(data)
} }
//HandleJobLogReq is implementation of method defined in interface 'Handler' // HandleJobLogReq is implementation of method defined in interface 'Handler'
func (dh *DefaultHandler) HandleJobLogReq(w http.ResponseWriter, req *http.Request) { func (dh *DefaultHandler) HandleJobLogReq(w http.ResponseWriter, req *http.Request) {
if !dh.preCheck(w) { if !dh.preCheck(w) {
return return

View File

@ -15,26 +15,26 @@ const (
apiVersion = "v1" apiVersion = "v1"
) )
//Router defines the related routes for the job service and directs the request // Router defines the related routes for the job service and directs the request
//to the right handler method. // to the right handler method.
type Router interface { type Router interface {
//ServeHTTP used to handle the http requests // ServeHTTP used to handle the http requests
ServeHTTP(w http.ResponseWriter, req *http.Request) ServeHTTP(w http.ResponseWriter, req *http.Request)
} }
//BaseRouter provides the basic routes for the job service based on the golang http server mux. // BaseRouter provides the basic routes for the job service based on the golang http server mux.
type BaseRouter struct { type BaseRouter struct {
//Use mux to keep the routes mapping. // Use mux to keep the routes mapping.
router *mux.Router router *mux.Router
//Handler used to handle the requests // Handler used to handle the requests
handler Handler handler Handler
//Do auth // Do auth
authenticator Authenticator authenticator Authenticator
} }
//NewBaseRouter is the constructor of BaseRouter. // NewBaseRouter is the constructor of BaseRouter.
func NewBaseRouter(handler Handler, authenticator Authenticator) Router { func NewBaseRouter(handler Handler, authenticator Authenticator) Router {
br := &BaseRouter{ br := &BaseRouter{
router: mux.NewRouter(), router: mux.NewRouter(),
@ -42,15 +42,15 @@ func NewBaseRouter(handler Handler, authenticator Authenticator) Router {
authenticator: authenticator, authenticator: authenticator,
} }
//Register routes here // Register routes here
br.registerRoutes() br.registerRoutes()
return br return br
} }
//ServeHTTP is the implementation of Router interface. // ServeHTTP is the implementation of Router interface.
func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
//Do auth // Do auth
if err := br.authenticator.DoAuth(req); err != nil { if err := br.authenticator.DoAuth(req); err != nil {
authErr := errs.UnauthorizedError(err) authErr := errs.UnauthorizedError(err)
w.WriteHeader(http.StatusUnauthorized) w.WriteHeader(http.StatusUnauthorized)
@ -58,11 +58,11 @@ func (br *BaseRouter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
return return
} }
//Directly pass requests to the server mux. // Directly pass requests to the server mux.
br.router.ServeHTTP(w, req) br.router.ServeHTTP(w, req)
} }
//registerRoutes adds routes to the server mux. // registerRoutes adds routes to the server mux.
func (br *BaseRouter) registerRoutes() { func (br *BaseRouter) registerRoutes() {
subRouter := br.router.PathPrefix(fmt.Sprintf("%s/%s", baseRoute, apiVersion)).Subrouter() subRouter := br.router.PathPrefix(fmt.Sprintf("%s/%s", baseRoute, apiVersion)).Subrouter()

View File

@ -14,37 +14,37 @@ import (
"github.com/goharbor/harbor/src/jobservice/logger" "github.com/goharbor/harbor/src/jobservice/logger"
) )
//Server serves the http requests. // Server serves the http requests.
type Server struct { type Server struct {
//The real backend http server to serve the requests // The real backend http server to serve the requests
httpServer *http.Server httpServer *http.Server
//Define the routes of http service // Define the routes of http service
router Router router Router
//Keep the configurations of server // Keep the configurations of server
config ServerConfig config ServerConfig
//The context // The context
context *env.Context context *env.Context
} }
//ServerConfig contains the configurations of Server. // ServerConfig contains the configurations of Server.
type ServerConfig struct { type ServerConfig struct {
//Protocol server listening on: https/http // Protocol server listening on: https/http
Protocol string Protocol string
//Server listening port // Server listening port
Port uint Port uint
//Cert file path if using https // Cert file path if using https
Cert string Cert string
//Key file path if using https // Key file path if using https
Key string Key string
} }
//NewServer is constructor of Server. // NewServer is constructor of Server.
func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server { func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
apiServer := &Server{ apiServer := &Server{
router: router, router: router,
@ -60,7 +60,7 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
IdleTimeout: 60 * time.Second, IdleTimeout: 60 * time.Second,
} }
//Initialize TLS/SSL config if protocol is https // Initialize TLS/SSL config if protocol is https
if cfg.Protocol == config.JobServiceProtocolHTTPS { if cfg.Protocol == config.JobServiceProtocolHTTPS {
tlsCfg := &tls.Config{ tlsCfg := &tls.Config{
MinVersion: tls.VersionTLS12, MinVersion: tls.VersionTLS12,
@ -83,7 +83,7 @@ func NewServer(ctx *env.Context, router Router, cfg ServerConfig) *Server {
return apiServer return apiServer
} }
//Start the server to serve requests. // Start the server to serve requests.
func (s *Server) Start() { func (s *Server) Start() {
s.context.WG.Add(1) s.context.WG.Add(1)
@ -106,7 +106,7 @@ func (s *Server) Start() {
}() }()
} }
//Stop server gracefully. // Stop server gracefully.
func (s *Server) Stop() { func (s *Server) Stop() {
go func() { go func() {
defer func() { defer func() {

View File

@ -1,6 +1,6 @@
// Copyright 2018 The Harbor Authors. All rights reserved. // Copyright 2018 The Harbor Authors. All rights reserved.
//Package config provides functions to handle the configurations of job service. // Package config provides functions to handle the configurations of job service.
package config package config
import ( import (
@ -30,81 +30,81 @@ const (
jobServiceAdminServerEndpoint = "ADMINSERVER_URL" jobServiceAdminServerEndpoint = "ADMINSERVER_URL"
jobServiceAuthSecret = "JOBSERVICE_SECRET" jobServiceAuthSecret = "JOBSERVICE_SECRET"
//JobServiceProtocolHTTPS points to the 'https' protocol // JobServiceProtocolHTTPS points to the 'https' protocol
JobServiceProtocolHTTPS = "https" JobServiceProtocolHTTPS = "https"
//JobServiceProtocolHTTP points to the 'http' protocol // JobServiceProtocolHTTP points to the 'http' protocol
JobServiceProtocolHTTP = "http" JobServiceProtocolHTTP = "http"
//JobServicePoolBackendRedis represents redis backend // JobServicePoolBackendRedis represents redis backend
JobServicePoolBackendRedis = "redis" JobServicePoolBackendRedis = "redis"
//secret of UI // secret of UI
uiAuthSecret = "UI_SECRET" uiAuthSecret = "UI_SECRET"
//redis protocol schema // redis protocol schema
redisSchema = "redis://" redisSchema = "redis://"
) )
//DefaultConfig is the default configuration reference // DefaultConfig is the default configuration reference
var DefaultConfig = &Configuration{} var DefaultConfig = &Configuration{}
//Configuration loads and keeps the related configuration items of job service. // Configuration loads and keeps the related configuration items of job service.
type Configuration struct { type Configuration struct {
//Protocol server listening on: https/http // Protocol server listening on: https/http
Protocol string `yaml:"protocol"` Protocol string `yaml:"protocol"`
//Server listening port // Server listening port
Port uint `yaml:"port"` Port uint `yaml:"port"`
AdminServer string `yaml:"admin_server"` AdminServer string `yaml:"admin_server"`
//Additional config when using https // Additional config when using https
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"` HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
//Configurations of worker pool // Configurations of worker pool
PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"` PoolConfig *PoolConfig `yaml:"worker_pool,omitempty"`
//Logger configurations // Logger configurations
LoggerConfig *LoggerConfig `yaml:"logger,omitempty"` LoggerConfig *LoggerConfig `yaml:"logger,omitempty"`
} }
//HTTPSConfig keeps additional configurations when using https protocol // HTTPSConfig keeps additional configurations when using https protocol
type HTTPSConfig struct { type HTTPSConfig struct {
Cert string `yaml:"cert"` Cert string `yaml:"cert"`
Key string `yaml:"key"` Key string `yaml:"key"`
} }
//RedisPoolConfig keeps redis pool info. // RedisPoolConfig keeps redis pool info.
type RedisPoolConfig struct { type RedisPoolConfig struct {
RedisURL string `yaml:"redis_url"` RedisURL string `yaml:"redis_url"`
Namespace string `yaml:"namespace"` Namespace string `yaml:"namespace"`
} }
//PoolConfig keeps worker pool configurations. // PoolConfig keeps worker pool configurations.
type PoolConfig struct { type PoolConfig struct {
//Worker concurrency // Worker concurrency
WorkerCount uint `yaml:"workers"` WorkerCount uint `yaml:"workers"`
Backend string `yaml:"backend"` Backend string `yaml:"backend"`
RedisPoolCfg *RedisPoolConfig `yaml:"redis_pool,omitempty"` RedisPoolCfg *RedisPoolConfig `yaml:"redis_pool,omitempty"`
} }
//LoggerConfig keeps logger configurations. // LoggerConfig keeps logger configurations.
type LoggerConfig struct { type LoggerConfig struct {
BasePath string `yaml:"path"` BasePath string `yaml:"path"`
LogLevel string `yaml:"level"` LogLevel string `yaml:"level"`
ArchivePeriod uint `yaml:"archive_period"` ArchivePeriod uint `yaml:"archive_period"`
} }
//Load the configuration options from the specified yaml file. // Load the configuration options from the specified yaml file.
//If the yaml file is specified and existing, load configurations from yaml file first; // If the yaml file is specified and existing, load configurations from yaml file first;
//If detecting env variables is specified, load configurations from env variables; // If detecting env variables is specified, load configurations from env variables;
//Please pay attentions, the detected env variable will override the same configuration item loading from file. // Please pay attentions, the detected env variable will override the same configuration item loading from file.
// //
//yamlFilePath string: The path config yaml file // yamlFilePath string: The path config yaml file
//readEnv bool : Whether detect the environment variables or not // readEnv bool : Whether detect the environment variables or not
func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error { func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
if !utils.IsEmptyStr(yamlFilePath) { if !utils.IsEmptyStr(yamlFilePath) {
//Try to load from file first // Try to load from file first
data, err := ioutil.ReadFile(yamlFilePath) data, err := ioutil.ReadFile(yamlFilePath)
if err != nil { if err != nil {
return err return err
@ -115,11 +115,11 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
} }
if detectEnv { if detectEnv {
//Load from env variables // Load from env variables
c.loadEnvs() c.loadEnvs()
} }
//translate redis url if needed // translate redis url if needed
if c.PoolConfig != nil && c.PoolConfig.RedisPoolCfg != nil { if c.PoolConfig != nil && c.PoolConfig.RedisPoolCfg != nil {
redisAddress := c.PoolConfig.RedisPoolCfg.RedisURL redisAddress := c.PoolConfig.RedisPoolCfg.RedisURL
if !utils.IsEmptyStr(redisAddress) { if !utils.IsEmptyStr(redisAddress) {
@ -135,11 +135,11 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error {
} }
} }
//Validate settings // Validate settings
return c.validate() return c.validate()
} }
//GetLogBasePath returns the log base path config // GetLogBasePath returns the log base path config
func GetLogBasePath() string { func GetLogBasePath() string {
if DefaultConfig.LoggerConfig != nil { if DefaultConfig.LoggerConfig != nil {
return DefaultConfig.LoggerConfig.BasePath return DefaultConfig.LoggerConfig.BasePath
@ -148,7 +148,7 @@ func GetLogBasePath() string {
return "" return ""
} }
//GetLogLevel returns the log level // GetLogLevel returns the log level
func GetLogLevel() string { func GetLogLevel() string {
if DefaultConfig.LoggerConfig != nil { if DefaultConfig.LoggerConfig != nil {
return DefaultConfig.LoggerConfig.LogLevel return DefaultConfig.LoggerConfig.LogLevel
@ -157,31 +157,31 @@ func GetLogLevel() string {
return "" return ""
} }
//GetLogArchivePeriod returns the archive period // GetLogArchivePeriod returns the archive period
func GetLogArchivePeriod() uint { func GetLogArchivePeriod() uint {
if DefaultConfig.LoggerConfig != nil { if DefaultConfig.LoggerConfig != nil {
return DefaultConfig.LoggerConfig.ArchivePeriod return DefaultConfig.LoggerConfig.ArchivePeriod
} }
return 1 //return default return 1 // return default
} }
//GetAuthSecret get the auth secret from the env // GetAuthSecret get the auth secret from the env
func GetAuthSecret() string { func GetAuthSecret() string {
return utils.ReadEnv(jobServiceAuthSecret) return utils.ReadEnv(jobServiceAuthSecret)
} }
//GetUIAuthSecret get the auth secret of UI side // GetUIAuthSecret get the auth secret of UI side
func GetUIAuthSecret() string { func GetUIAuthSecret() string {
return utils.ReadEnv(uiAuthSecret) return utils.ReadEnv(uiAuthSecret)
} }
//GetAdminServerEndpoint return the admin server endpoint // GetAdminServerEndpoint return the admin server endpoint
func GetAdminServerEndpoint() string { func GetAdminServerEndpoint() string {
return DefaultConfig.AdminServer return DefaultConfig.AdminServer
} }
//Load env variables // Load env variables
func (c *Configuration) loadEnvs() { func (c *Configuration) loadEnvs() {
prot := utils.ReadEnv(jobServiceProtocol) prot := utils.ReadEnv(jobServiceProtocol)
if !utils.IsEmptyStr(prot) { if !utils.IsEmptyStr(prot) {
@ -195,7 +195,7 @@ func (c *Configuration) loadEnvs() {
} }
} }
//Only when protocol is https // Only when protocol is https
if c.Protocol == JobServiceProtocolHTTPS { if c.Protocol == JobServiceProtocolHTTPS {
cert := utils.ReadEnv(jobServiceHTTPCert) cert := utils.ReadEnv(jobServiceHTTPCert)
if !utils.IsEmptyStr(cert) { if !utils.IsEmptyStr(cert) {
@ -256,7 +256,7 @@ func (c *Configuration) loadEnvs() {
} }
} }
//logger // logger
loggerPath := utils.ReadEnv(jobServiceLoggerBasePath) loggerPath := utils.ReadEnv(jobServiceLoggerBasePath)
if !utils.IsEmptyStr(loggerPath) { if !utils.IsEmptyStr(loggerPath) {
if c.LoggerConfig == nil { if c.LoggerConfig == nil {
@ -281,14 +281,14 @@ func (c *Configuration) loadEnvs() {
} }
} }
//admin server // admin server
if adminServer := utils.ReadEnv(jobServiceAdminServerEndpoint); !utils.IsEmptyStr(adminServer) { if adminServer := utils.ReadEnv(jobServiceAdminServerEndpoint); !utils.IsEmptyStr(adminServer) {
c.AdminServer = adminServer c.AdminServer = adminServer
} }
} }
//Check if the configurations are valid settings. // Check if the configurations are valid settings.
func (c *Configuration) validate() error { func (c *Configuration) validate() error {
if c.Protocol != JobServiceProtocolHTTPS && if c.Protocol != JobServiceProtocolHTTPS &&
c.Protocol != JobServiceProtocolHTTP { c.Protocol != JobServiceProtocolHTTP {
@ -323,7 +323,7 @@ func (c *Configuration) validate() error {
return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend) return fmt.Errorf("worker pool backend %s does not support", c.PoolConfig.Backend)
} }
//When backend is redis // When backend is redis
if c.PoolConfig.Backend == JobServicePoolBackendRedis { if c.PoolConfig.Backend == JobServicePoolBackendRedis {
if c.PoolConfig.RedisPoolCfg == nil { if c.PoolConfig.RedisPoolCfg == nil {
return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend) return fmt.Errorf("redis pool must be configured when backend is set to '%s'", c.PoolConfig.Backend)
@ -366,5 +366,5 @@ func (c *Configuration) validate() error {
return fmt.Errorf("invalid admin server endpoint: %s", err) return fmt.Errorf("invalid admin server endpoint: %s", err)
} }
return nil //valid return nil // valid
} }

View File

@ -21,38 +21,38 @@ const (
hookDeactivated = "error" hookDeactivated = "error"
) )
//Controller implement the core interface and provides related job handle methods. // Controller implement the core interface and provides related job handle methods.
//Controller will coordinate the lower components to complete the process as a commander role. // Controller will coordinate the lower components to complete the process as a commander role.
type Controller struct { type Controller struct {
//Refer the backend pool // Refer the backend pool
backendPool pool.Interface backendPool pool.Interface
} }
//NewController is constructor of Controller. // NewController is constructor of Controller.
func NewController(backendPool pool.Interface) *Controller { func NewController(backendPool pool.Interface) *Controller {
return &Controller{ return &Controller{
backendPool: backendPool, backendPool: backendPool,
} }
} }
//LaunchJob is implementation of same method in core interface. // LaunchJob is implementation of same method in core interface.
func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) { func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
if err := validJobReq(req); err != nil { if err := validJobReq(req); err != nil {
return models.JobStats{}, err return models.JobStats{}, err
} }
//Validate job name // Validate job name
jobType, isKnownJob := c.backendPool.IsKnownJob(req.Job.Name) jobType, isKnownJob := c.backendPool.IsKnownJob(req.Job.Name)
if !isKnownJob { if !isKnownJob {
return models.JobStats{}, fmt.Errorf("job with name '%s' is unknown", req.Job.Name) return models.JobStats{}, fmt.Errorf("job with name '%s' is unknown", req.Job.Name)
} }
//Validate parameters // Validate parameters
if err := c.backendPool.ValidateJobParameters(jobType, req.Job.Parameters); err != nil { if err := c.backendPool.ValidateJobParameters(jobType, req.Job.Parameters); err != nil {
return models.JobStats{}, err return models.JobStats{}, err
} }
//Enqueue job regarding of the kind // Enqueue job regarding of the kind
var ( var (
res models.JobStats res models.JobStats
err error err error
@ -73,7 +73,7 @@ func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
res, err = c.backendPool.Enqueue(req.Job.Name, req.Job.Parameters, req.Job.Metadata.IsUnique) res, err = c.backendPool.Enqueue(req.Job.Name, req.Job.Parameters, req.Job.Metadata.IsUnique)
} }
//Register status hook? // Register status hook?
if err == nil { if err == nil {
if !utils.IsEmptyStr(req.Job.StatusHook) { if !utils.IsEmptyStr(req.Job.StatusHook) {
if err := c.backendPool.RegisterHook(res.Stats.JobID, req.Job.StatusHook); err != nil { if err := c.backendPool.RegisterHook(res.Stats.JobID, req.Job.StatusHook); err != nil {
@ -87,7 +87,7 @@ func (c *Controller) LaunchJob(req models.JobRequest) (models.JobStats, error) {
return res, err return res, err
} }
//GetJob is implementation of same method in core interface. // GetJob is implementation of same method in core interface.
func (c *Controller) GetJob(jobID string) (models.JobStats, error) { func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
if utils.IsEmptyStr(jobID) { if utils.IsEmptyStr(jobID) {
return models.JobStats{}, errors.New("empty job ID") return models.JobStats{}, errors.New("empty job ID")
@ -96,7 +96,7 @@ func (c *Controller) GetJob(jobID string) (models.JobStats, error) {
return c.backendPool.GetJobStats(jobID) return c.backendPool.GetJobStats(jobID)
} }
//StopJob is implementation of same method in core interface. // StopJob is implementation of same method in core interface.
func (c *Controller) StopJob(jobID string) error { func (c *Controller) StopJob(jobID string) error {
if utils.IsEmptyStr(jobID) { if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID") return errors.New("empty job ID")
@ -105,7 +105,7 @@ func (c *Controller) StopJob(jobID string) error {
return c.backendPool.StopJob(jobID) return c.backendPool.StopJob(jobID)
} }
//CancelJob is implementation of same method in core interface. // CancelJob is implementation of same method in core interface.
func (c *Controller) CancelJob(jobID string) error { func (c *Controller) CancelJob(jobID string) error {
if utils.IsEmptyStr(jobID) { if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID") return errors.New("empty job ID")
@ -114,7 +114,7 @@ func (c *Controller) CancelJob(jobID string) error {
return c.backendPool.CancelJob(jobID) return c.backendPool.CancelJob(jobID)
} }
//RetryJob is implementation of same method in core interface. // RetryJob is implementation of same method in core interface.
func (c *Controller) RetryJob(jobID string) error { func (c *Controller) RetryJob(jobID string) error {
if utils.IsEmptyStr(jobID) { if utils.IsEmptyStr(jobID) {
return errors.New("empty job ID") return errors.New("empty job ID")
@ -123,7 +123,7 @@ func (c *Controller) RetryJob(jobID string) error {
return c.backendPool.RetryJob(jobID) return c.backendPool.RetryJob(jobID)
} }
//GetJobLogData is used to return the log text data for the specified job if exists // GetJobLogData is used to return the log text data for the specified job if exists
func (c *Controller) GetJobLogData(jobID string) ([]byte, error) { func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
if utils.IsEmptyStr(jobID) { if utils.IsEmptyStr(jobID) {
return nil, errors.New("empty job ID") return nil, errors.New("empty job ID")
@ -142,7 +142,7 @@ func (c *Controller) GetJobLogData(jobID string) ([]byte, error) {
return logData, nil return logData, nil
} }
//CheckStatus is implementation of same method in core interface. // CheckStatus is implementation of same method in core interface.
func (c *Controller) CheckStatus() (models.JobPoolStats, error) { func (c *Controller) CheckStatus() (models.JobPoolStats, error) {
return c.backendPool.Stats() return c.backendPool.Stats()
} }

View File

@ -1,59 +1,59 @@
// Copyright 2018 The Harbor Authors. All rights reserved. // Copyright 2018 The Harbor Authors. All rights reserved.
//Package core provides the main job operation interface and components. // Package core provides the main job operation interface and components.
package core package core
import ( import (
"github.com/goharbor/harbor/src/jobservice/models" "github.com/goharbor/harbor/src/jobservice/models"
) )
//Interface defines the related main methods of job operation. // Interface defines the related main methods of job operation.
type Interface interface { type Interface interface {
//LaunchJob is used to handle the job submission request. // LaunchJob is used to handle the job submission request.
// //
//req JobRequest : Job request contains related required information of queuing job. // req JobRequest : Job request contains related required information of queuing job.
// //
//Returns: // Returns:
// JobStats: Job status info with ID and self link returned if job is successfully launched. // JobStats: Job status info with ID and self link returned if job is successfully launched.
// error : Error returned if failed to launch the specified job. // error : Error returned if failed to launch the specified job.
LaunchJob(req models.JobRequest) (models.JobStats, error) LaunchJob(req models.JobRequest) (models.JobStats, error)
//GetJob is used to handle the job stats query request. // GetJob is used to handle the job stats query request.
// //
//jobID string: ID of job. // jobID string: ID of job.
// //
//Returns: // Returns:
// JobStats: Job status info if job exists. // JobStats: Job status info if job exists.
// error : Error returned if failed to get the specified job. // error : Error returned if failed to get the specified job.
GetJob(jobID string) (models.JobStats, error) GetJob(jobID string) (models.JobStats, error)
//StopJob is used to handle the job stopping request. // StopJob is used to handle the job stopping request.
// //
//jobID string: ID of job. // jobID string: ID of job.
// //
//Return: // Return:
// error : Error returned if failed to stop the specified job. // error : Error returned if failed to stop the specified job.
StopJob(jobID string) error StopJob(jobID string) error
//RetryJob is used to handle the job retrying request. // RetryJob is used to handle the job retrying request.
// //
//jobID string : ID of job. // jobID string : ID of job.
// //
//Return: // Return:
// error : Error returned if failed to retry the specified job. // error : Error returned if failed to retry the specified job.
RetryJob(jobID string) error RetryJob(jobID string) error
//Cancel the job // Cancel the job
// //
//jobID string : ID of the enqueued job // jobID string : ID of the enqueued job
// //
//Returns: // Returns:
// error : error returned if meet any problems // error : error returned if meet any problems
CancelJob(jobID string) error CancelJob(jobID string) error
//CheckStatus is used to handle the job service healthy status checking request. // CheckStatus is used to handle the job service healthy status checking request.
CheckStatus() (models.JobPoolStats, error) CheckStatus() (models.JobPoolStats, error)
//GetJobLogData is used to return the log text data for the specified job if exists // GetJobLogData is used to return the log text data for the specified job if exists
GetJobLogData(jobID string) ([]byte, error) GetJobLogData(jobID string) ([]byte, error)
} }

View File

@ -5,20 +5,20 @@ import (
"sync" "sync"
) )
//Context keep some sharable materials and system controlling channels. // Context keep some sharable materials and system controlling channels.
//The system context.Context interface is also included. // The system context.Context interface is also included.
type Context struct { type Context struct {
//The system context with cancel capability. // The system context with cancel capability.
SystemContext context.Context SystemContext context.Context
//Coordination signal // Coordination signal
WG *sync.WaitGroup WG *sync.WaitGroup
//Report errors to bootstrap component // Report errors to bootstrap component
//Once error is reported by lower components, the whole system should exit // Once error is reported by lower components, the whole system should exit
ErrorChan chan error ErrorChan chan error
//The base job context reference // The base job context reference
//It will be the parent conetext of job execution context // It will be the parent conetext of job execution context
JobContext JobContext JobContext JobContext
} }

View File

@ -8,54 +8,54 @@ import (
"github.com/goharbor/harbor/src/jobservice/logger" "github.com/goharbor/harbor/src/jobservice/logger"
) )
//JobContext is combination of BaseContext and other job specified resources. // JobContext is combination of BaseContext and other job specified resources.
//JobContext will be the real execution context for one job. // JobContext will be the real execution context for one job.
type JobContext interface { type JobContext interface {
//Build the context based on the parent context // Build the context based on the parent context
// //
//dep JobData : Dependencies for building the context, just in case that the build // dep JobData : Dependencies for building the context, just in case that the build
//function need some external info // function need some external info
// //
//Returns: // Returns:
// new JobContext based on the parent one // new JobContext based on the parent one
// error if meet any problems // error if meet any problems
Build(dep JobData) (JobContext, error) Build(dep JobData) (JobContext, error)
//Get property from the context // Get property from the context
// //
//prop string : key of the context property // prop string : key of the context property
// //
//Returns: // Returns:
// The data of the specified context property if have // The data of the specified context property if have
// bool to indicate if the property existing // bool to indicate if the property existing
Get(prop string) (interface{}, bool) Get(prop string) (interface{}, bool)
//SystemContext returns the system context // SystemContext returns the system context
// //
//Returns: // Returns:
// context.Context // context.Context
SystemContext() context.Context SystemContext() context.Context
//Checkin is bridge func for reporting detailed status // Checkin is bridge func for reporting detailed status
// //
//status string : detailed status // status string : detailed status
// //
//Returns: // Returns:
// error if meet any problems // error if meet any problems
Checkin(status string) error Checkin(status string) error
//OPCommand return the control operational command like stop/cancel if have // OPCommand return the control operational command like stop/cancel if have
// //
//Returns: // Returns:
// op command if have // op command if have
// flag to indicate if have command // flag to indicate if have command
OPCommand() (string, bool) OPCommand() (string, bool)
//Return the logger // Return the logger
GetLogger() logger.Interface GetLogger() logger.Interface
} }
//JobData defines job context dependencies. // JobData defines job context dependencies.
type JobData struct { type JobData struct {
ID string ID string
Name string Name string
@ -63,5 +63,5 @@ type JobData struct {
ExtraData map[string]interface{} ExtraData map[string]interface{}
} }
//JobContextInitializer is a func to initialize the concrete job context // JobContextInitializer is a func to initialize the concrete job context
type JobContextInitializer func(ctx *Context) (JobContext, error) type JobContextInitializer func(ctx *Context) (JobContext, error)

View File

@ -1,6 +1,6 @@
// Copyright 2018 The Harbor Authors. All rights reserved. // Copyright 2018 The Harbor Authors. All rights reserved.
//Package errs define some system errors with specified types. // Package errs define some system errors with specified types.
package errs package errs
import ( import (
@ -8,46 +8,46 @@ import (
) )
const ( const (
//JobStoppedErrorCode is code for jobStoppedError // JobStoppedErrorCode is code for jobStoppedError
JobStoppedErrorCode = 10000 + iota JobStoppedErrorCode = 10000 + iota
//JobCancelledErrorCode is code for jobCancelledError // JobCancelledErrorCode is code for jobCancelledError
JobCancelledErrorCode JobCancelledErrorCode
//ReadRequestBodyErrorCode is code for the error of reading http request body error // ReadRequestBodyErrorCode is code for the error of reading http request body error
ReadRequestBodyErrorCode ReadRequestBodyErrorCode
//HandleJSONDataErrorCode is code for the error of handling json data error // HandleJSONDataErrorCode is code for the error of handling json data error
HandleJSONDataErrorCode HandleJSONDataErrorCode
//MissingBackendHandlerErrorCode is code for the error of missing backend controller // MissingBackendHandlerErrorCode is code for the error of missing backend controller
MissingBackendHandlerErrorCode MissingBackendHandlerErrorCode
//LaunchJobErrorCode is code for the error of launching job // LaunchJobErrorCode is code for the error of launching job
LaunchJobErrorCode LaunchJobErrorCode
//CheckStatsErrorCode is code for the error of checking stats of worker pool // CheckStatsErrorCode is code for the error of checking stats of worker pool
CheckStatsErrorCode CheckStatsErrorCode
//GetJobStatsErrorCode is code for the error of getting stats of enqueued job // GetJobStatsErrorCode is code for the error of getting stats of enqueued job
GetJobStatsErrorCode GetJobStatsErrorCode
//StopJobErrorCode is code for the error of stopping job // StopJobErrorCode is code for the error of stopping job
StopJobErrorCode StopJobErrorCode
//CancelJobErrorCode is code for the error of cancelling job // CancelJobErrorCode is code for the error of cancelling job
CancelJobErrorCode CancelJobErrorCode
//RetryJobErrorCode is code for the error of retrying job // RetryJobErrorCode is code for the error of retrying job
RetryJobErrorCode RetryJobErrorCode
//UnknownActionNameErrorCode is code for the case of unknown action name // UnknownActionNameErrorCode is code for the case of unknown action name
UnknownActionNameErrorCode UnknownActionNameErrorCode
//GetJobLogErrorCode is code for the error of getting job log // GetJobLogErrorCode is code for the error of getting job log
GetJobLogErrorCode GetJobLogErrorCode
//NoObjectFoundErrorCode is code for the error of no object found // NoObjectFoundErrorCode is code for the error of no object found
NoObjectFoundErrorCode NoObjectFoundErrorCode
//UnAuthorizedErrorCode is code for the error of unauthorized accessing // UnAuthorizedErrorCode is code for the error of unauthorized accessing
UnAuthorizedErrorCode UnAuthorizedErrorCode
) )
//baseError ... // baseError ...
type baseError struct { type baseError struct {
Code uint16 `json:"code"` Code uint16 `json:"code"`
Err string `json:"message"` Err string `json:"message"`
Description string `json:"details,omitempty"` Description string `json:"details,omitempty"`
} }
//Error is implementation of error interface. // Error is implementation of error interface.
func (be baseError) Error() string { func (be baseError) Error() string {
if data, err := json.Marshal(be); err == nil { if data, err := json.Marshal(be); err == nil {
return string(data) return string(data)
@ -56,7 +56,7 @@ func (be baseError) Error() string {
return "{}" return "{}"
} }
//New customized errors // New customized errors
func New(code uint16, err string, description string) error { func New(code uint16, err string, description string) error {
return baseError{ return baseError{
Code: code, Code: code,
@ -65,72 +65,72 @@ func New(code uint16, err string, description string) error {
} }
} }
//ReadRequestBodyError is error wrapper for the error of reading request body. // ReadRequestBodyError is error wrapper for the error of reading request body.
func ReadRequestBodyError(err error) error { func ReadRequestBodyError(err error) error {
return New(ReadRequestBodyErrorCode, "Read request body failed with error", err.Error()) return New(ReadRequestBodyErrorCode, "Read request body failed with error", err.Error())
} }
//HandleJSONDataError is error wrapper for the error of handling json data. // HandleJSONDataError is error wrapper for the error of handling json data.
func HandleJSONDataError(err error) error { func HandleJSONDataError(err error) error {
return New(HandleJSONDataErrorCode, "Handle json data failed with error", err.Error()) return New(HandleJSONDataErrorCode, "Handle json data failed with error", err.Error())
} }
//MissingBackendHandlerError is error wrapper for the error of missing backend controller. // MissingBackendHandlerError is error wrapper for the error of missing backend controller.
func MissingBackendHandlerError(err error) error { func MissingBackendHandlerError(err error) error {
return New(MissingBackendHandlerErrorCode, "Missing backend controller to handle the requests", err.Error()) return New(MissingBackendHandlerErrorCode, "Missing backend controller to handle the requests", err.Error())
} }
//LaunchJobError is error wrapper for the error of launching job failed. // LaunchJobError is error wrapper for the error of launching job failed.
func LaunchJobError(err error) error { func LaunchJobError(err error) error {
return New(LaunchJobErrorCode, "Launch job failed with error", err.Error()) return New(LaunchJobErrorCode, "Launch job failed with error", err.Error())
} }
//CheckStatsError is error wrapper for the error of checking stats failed // CheckStatsError is error wrapper for the error of checking stats failed
func CheckStatsError(err error) error { func CheckStatsError(err error) error {
return New(CheckStatsErrorCode, "Check stats of server failed with error", err.Error()) return New(CheckStatsErrorCode, "Check stats of server failed with error", err.Error())
} }
//GetJobStatsError is error wrapper for the error of getting job stats // GetJobStatsError is error wrapper for the error of getting job stats
func GetJobStatsError(err error) error { func GetJobStatsError(err error) error {
return New(GetJobStatsErrorCode, "Get job stats failed with error", err.Error()) return New(GetJobStatsErrorCode, "Get job stats failed with error", err.Error())
} }
//StopJobError is error for the case of stopping job failed // StopJobError is error for the case of stopping job failed
func StopJobError(err error) error { func StopJobError(err error) error {
return New(StopJobErrorCode, "Stop job failed with error", err.Error()) return New(StopJobErrorCode, "Stop job failed with error", err.Error())
} }
//CancelJobError is error for the case of cancelling job failed // CancelJobError is error for the case of cancelling job failed
func CancelJobError(err error) error { func CancelJobError(err error) error {
return New(CancelJobErrorCode, "Cancel job failed with error", err.Error()) return New(CancelJobErrorCode, "Cancel job failed with error", err.Error())
} }
//RetryJobError is error for the case of retrying job failed // RetryJobError is error for the case of retrying job failed
func RetryJobError(err error) error { func RetryJobError(err error) error {
return New(RetryJobErrorCode, "Retry job failed with error", err.Error()) return New(RetryJobErrorCode, "Retry job failed with error", err.Error())
} }
//UnknownActionNameError is error for the case of getting unknown job action // UnknownActionNameError is error for the case of getting unknown job action
func UnknownActionNameError(err error) error { func UnknownActionNameError(err error) error {
return New(UnknownActionNameErrorCode, "Unknown job action name", err.Error()) return New(UnknownActionNameErrorCode, "Unknown job action name", err.Error())
} }
//GetJobLogError is error for the case of getting job log failed // GetJobLogError is error for the case of getting job log failed
func GetJobLogError(err error) error { func GetJobLogError(err error) error {
return New(GetJobLogErrorCode, "Failed to get the job log", err.Error()) return New(GetJobLogErrorCode, "Failed to get the job log", err.Error())
} }
//UnauthorizedError is error for the case of unauthorized accessing // UnauthorizedError is error for the case of unauthorized accessing
func UnauthorizedError(err error) error { func UnauthorizedError(err error) error {
return New(UnAuthorizedErrorCode, "Unauthorized", err.Error()) return New(UnAuthorizedErrorCode, "Unauthorized", err.Error())
} }
//jobStoppedError is designed for the case of stopping job. // jobStoppedError is designed for the case of stopping job.
type jobStoppedError struct { type jobStoppedError struct {
baseError baseError
} }
//JobStoppedError is error wrapper for the case of stopping job. // JobStoppedError is error wrapper for the case of stopping job.
func JobStoppedError() error { func JobStoppedError() error {
return jobStoppedError{ return jobStoppedError{
baseError{ baseError{
@ -140,12 +140,12 @@ func JobStoppedError() error {
} }
} }
//jobCancelledError is designed for the case of cancelling job. // jobCancelledError is designed for the case of cancelling job.
type jobCancelledError struct { type jobCancelledError struct {
baseError baseError
} }
//JobCancelledError is error wrapper for the case of cancelling job. // JobCancelledError is error wrapper for the case of cancelling job.
func JobCancelledError() error { func JobCancelledError() error {
return jobCancelledError{ return jobCancelledError{
baseError{ baseError{
@ -155,12 +155,12 @@ func JobCancelledError() error {
} }
} }
//objectNotFound is designed for the case of no object found // objectNotFound is designed for the case of no object found
type objectNotFoundError struct { type objectNotFoundError struct {
baseError baseError
} }
//NoObjectFoundError is error wrapper for the case of no object found // NoObjectFoundError is error wrapper for the case of no object found
func NoObjectFoundError(object string) error { func NoObjectFoundError(object string) error {
return objectNotFoundError{ return objectNotFoundError{
baseError{ baseError{
@ -171,19 +171,19 @@ func NoObjectFoundError(object string) error {
} }
} }
//IsJobStoppedError return true if the error is jobStoppedError // IsJobStoppedError return true if the error is jobStoppedError
func IsJobStoppedError(err error) bool { func IsJobStoppedError(err error) bool {
_, ok := err.(jobStoppedError) _, ok := err.(jobStoppedError)
return ok return ok
} }
//IsJobCancelledError return true if the error is jobCancelledError // IsJobCancelledError return true if the error is jobCancelledError
func IsJobCancelledError(err error) bool { func IsJobCancelledError(err error) bool {
_, ok := err.(jobCancelledError) _, ok := err.(jobCancelledError)
return ok return ok
} }
//IsObjectNotFoundError return true if the error is objectNotFoundError // IsObjectNotFoundError return true if the error is objectNotFoundError
func IsObjectNotFoundError(err error) bool { func IsObjectNotFoundError(err error) bool {
_, ok := err.(objectNotFoundError) _, ok := err.(objectNotFoundError)
return ok return ok

View File

@ -25,28 +25,28 @@ const (
maxRetryTimes = 5 maxRetryTimes = 5
) )
//Context ... // Context ...
type Context struct { type Context struct {
//System context // System context
sysContext context.Context sysContext context.Context
//Logger for job // Logger for job
logger logger.Interface logger logger.Interface
//op command func // op command func
opCommandFunc job.CheckOPCmdFunc opCommandFunc job.CheckOPCmdFunc
//checkin func // checkin func
checkInFunc job.CheckInFunc checkInFunc job.CheckInFunc
//other required information // other required information
properties map[string]interface{} properties map[string]interface{}
//admin server client // admin server client
adminClient client.Client adminClient client.Client
} }
//NewContext ... // NewContext ...
func NewContext(sysCtx context.Context, adminClient client.Client) *Context { func NewContext(sysCtx context.Context, adminClient client.Client) *Context {
return &Context{ return &Context{
sysContext: sysCtx, sysContext: sysCtx,
@ -55,7 +55,7 @@ func NewContext(sysCtx context.Context, adminClient client.Client) *Context {
} }
} }
//Init ... // Init ...
func (c *Context) Init() error { func (c *Context) Init() error {
var ( var (
counter = 0 counter = 0
@ -83,8 +83,8 @@ func (c *Context) Init() error {
return dao.InitDatabase(db) return dao.InitDatabase(db)
} }
//Build implements the same method in env.JobContext interface // Build implements the same method in env.JobContext interface
//This func will build the job execution context before running // This func will build the job execution context before running
func (c *Context) Build(dep env.JobData) (env.JobContext, error) { func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
jContext := &Context{ jContext := &Context{
sysContext: c.sysContext, sysContext: c.sysContext,
@ -92,14 +92,14 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
properties: make(map[string]interface{}), properties: make(map[string]interface{}),
} }
//Copy properties // Copy properties
if len(c.properties) > 0 { if len(c.properties) > 0 {
for k, v := range c.properties { for k, v := range c.properties {
jContext.properties[k] = v jContext.properties[k] = v
} }
} }
//Refresh admin server properties // Refresh admin server properties
props, err := c.adminClient.GetCfgs() props, err := c.adminClient.GetCfgs()
if err != nil { if err != nil {
return nil, err return nil, err
@ -108,7 +108,7 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
jContext.properties[k] = v jContext.properties[k] = v
} }
//Init logger here // Init logger here
logPath := fmt.Sprintf("%s/%s.log", config.GetLogBasePath(), dep.ID) logPath := fmt.Sprintf("%s/%s.log", config.GetLogBasePath(), dep.ID)
jContext.logger = jlogger.New(logPath, config.GetLogLevel()) jContext.logger = jlogger.New(logPath, config.GetLogLevel())
if jContext.logger == nil { if jContext.logger == nil {
@ -141,18 +141,18 @@ func (c *Context) Build(dep env.JobData) (env.JobContext, error) {
return jContext, nil return jContext, nil
} }
//Get implements the same method in env.JobContext interface // Get implements the same method in env.JobContext interface
func (c *Context) Get(prop string) (interface{}, bool) { func (c *Context) Get(prop string) (interface{}, bool) {
v, ok := c.properties[prop] v, ok := c.properties[prop]
return v, ok return v, ok
} }
//SystemContext implements the same method in env.JobContext interface // SystemContext implements the same method in env.JobContext interface
func (c *Context) SystemContext() context.Context { func (c *Context) SystemContext() context.Context {
return c.sysContext return c.sysContext
} }
//Checkin is bridge func for reporting detailed status // Checkin is bridge func for reporting detailed status
func (c *Context) Checkin(status string) error { func (c *Context) Checkin(status string) error {
if c.checkInFunc != nil { if c.checkInFunc != nil {
c.checkInFunc(status) c.checkInFunc(status)
@ -163,7 +163,7 @@ func (c *Context) Checkin(status string) error {
return nil return nil
} }
//OPCommand return the control operational command like stop/cancel if have // OPCommand return the control operational command like stop/cancel if have
func (c *Context) OPCommand() (string, bool) { func (c *Context) OPCommand() (string, bool) {
if c.opCommandFunc != nil { if c.opCommandFunc != nil {
return c.opCommandFunc() return c.opCommandFunc()
@ -172,7 +172,7 @@ func (c *Context) OPCommand() (string, bool) {
return "", false return "", false
} }
//GetLogger returns the logger // GetLogger returns the logger
func (c *Context) GetLogger() logger.Interface { func (c *Context) GetLogger() logger.Interface {
return c.logger return c.logger
} }

View File

@ -17,20 +17,20 @@ import (
"github.com/goharbor/harbor/src/jobservice/env" "github.com/goharbor/harbor/src/jobservice/env"
) )
//DemoJob is the job to demostrate the job interface. // DemoJob is the job to demostrate the job interface.
type DemoJob struct{} type DemoJob struct{}
//MaxFails is implementation of same method in Interface. // MaxFails is implementation of same method in Interface.
func (dj *DemoJob) MaxFails() uint { func (dj *DemoJob) MaxFails() uint {
return 3 return 3
} }
//ShouldRetry ... // ShouldRetry ...
func (dj *DemoJob) ShouldRetry() bool { func (dj *DemoJob) ShouldRetry() bool {
return true return true
} }
//Validate is implementation of same method in Interface. // Validate is implementation of same method in Interface.
func (dj *DemoJob) Validate(params map[string]interface{}) error { func (dj *DemoJob) Validate(params map[string]interface{}) error {
if params == nil || len(params) == 0 { if params == nil || len(params) == 0 {
return errors.New("parameters required for replication job") return errors.New("parameters required for replication job")
@ -47,7 +47,7 @@ func (dj *DemoJob) Validate(params map[string]interface{}) error {
return nil return nil
} }
//Run the replication logic here. // Run the replication logic here.
func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error { func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error {
logger := ctx.GetLogger() logger := ctx.GetLogger()
@ -69,9 +69,9 @@ func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error
/*if 1 != 0 { /*if 1 != 0 {
return errors.New("I suicide") return errors.New("I suicide")
}*/ }*/
//runtime error // runtime error
//var runtime_err error = nil // var runtime_err error = nil
//fmt.Println(runtime_err.Error()) // fmt.Println(runtime_err.Error())
logger.Info("check in 30%") logger.Info("check in 30%")
ctx.Checkin("30%") ctx.Checkin("30%")
@ -83,10 +83,10 @@ func (dj *DemoJob) Run(ctx env.JobContext, params map[string]interface{}) error
ctx.Checkin("100%") ctx.Checkin("100%")
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
//HOLD ON FOR A WHILE // HOLD ON FOR A WHILE
logger.Error("Holding for 20 sec") logger.Error("Holding for 20 sec")
<-time.After(15 * time.Second) <-time.After(15 * time.Second)
//logger.Fatal("I'm back, check if I'm stopped/cancelled") // logger.Fatal("I'm back, check if I'm stopped/cancelled")
if cmd, ok := ctx.OPCommand(); ok { if cmd, ok := ctx.OPCommand(); ok {
logger.Infof("cmd=%s\n", cmd) logger.Infof("cmd=%s\n", cmd)

View File

@ -2,9 +2,9 @@
package impl package impl
//Define the register name constants of known jobs // Define the register name constants of known jobs
const ( const (
//KnownJobDemo is name of demo job // KnownJobDemo is name of demo job
KnownJobDemo = "DEMO" KnownJobDemo = "DEMO"
) )

Some files were not shown because too many files have changed in this diff Show More