mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-27 12:46:03 +01:00
Merge pull request #8299 from heww/new-project-with-quota
feat: Quota support for new project API
This commit is contained in:
commit
5d59ea266c
@ -3629,6 +3629,14 @@ definitions:
|
|||||||
metadata:
|
metadata:
|
||||||
description: The metadata of the project.
|
description: The metadata of the project.
|
||||||
$ref: '#/definitions/ProjectMetadata'
|
$ref: '#/definitions/ProjectMetadata'
|
||||||
|
count_limit:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The count quota of the project.
|
||||||
|
storage_limit:
|
||||||
|
type: integer
|
||||||
|
format: int64
|
||||||
|
description: The storage quota of the project.
|
||||||
Project:
|
Project:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
@ -4346,6 +4354,9 @@ definitions:
|
|||||||
auth_mode:
|
auth_mode:
|
||||||
type: string
|
type: string
|
||||||
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
||||||
|
count_per_project:
|
||||||
|
type: string
|
||||||
|
description: The default count quota for the new created projects.
|
||||||
email_from:
|
email_from:
|
||||||
type: string
|
type: string
|
||||||
description: The sender name for Email notification.
|
description: The sender name for Email notification.
|
||||||
@ -4412,6 +4423,9 @@ definitions:
|
|||||||
self_registration:
|
self_registration:
|
||||||
type: boolean
|
type: boolean
|
||||||
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
||||||
|
storage_per_project:
|
||||||
|
type: string
|
||||||
|
description: The default storage quota for the new created projects.
|
||||||
token_expiration:
|
token_expiration:
|
||||||
type: integer
|
type: integer
|
||||||
description: 'The expiration time of the token for internal Registry, in minutes.'
|
description: 'The expiration time of the token for internal Registry, in minutes.'
|
||||||
@ -4437,6 +4451,9 @@ definitions:
|
|||||||
auth_mode:
|
auth_mode:
|
||||||
$ref: '#/definitions/StringConfigItem'
|
$ref: '#/definitions/StringConfigItem'
|
||||||
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
description: 'The auth mode of current system, such as "db_auth", "ldap_auth"'
|
||||||
|
count_per_project:
|
||||||
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
|
description: The default count quota for the new created projects.
|
||||||
email_from:
|
email_from:
|
||||||
$ref: '#/definitions/StringConfigItem'
|
$ref: '#/definitions/StringConfigItem'
|
||||||
description: The sender name for Email notification.
|
description: The sender name for Email notification.
|
||||||
@ -4503,6 +4520,9 @@ definitions:
|
|||||||
self_registration:
|
self_registration:
|
||||||
$ref: '#/definitions/BoolConfigItem'
|
$ref: '#/definitions/BoolConfigItem'
|
||||||
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
description: 'Whether the Harbor instance supports self-registration. If it''s set to false, admin need to add user to the instance.'
|
||||||
|
storage_per_project:
|
||||||
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
|
description: The default storage quota for the new created projects.
|
||||||
token_expiration:
|
token_expiration:
|
||||||
$ref: '#/definitions/IntegerConfigItem'
|
$ref: '#/definitions/IntegerConfigItem'
|
||||||
description: 'The expiration time of the token for internal Registry, in minutes.'
|
description: 'The expiration time of the token for internal Registry, in minutes.'
|
||||||
|
@ -70,3 +70,26 @@ CREATE TABLE quota_usage (
|
|||||||
update_time timestamp default CURRENT_TIMESTAMP,
|
update_time timestamp default CURRENT_TIMESTAMP,
|
||||||
UNIQUE(reference, reference_id)
|
UNIQUE(reference, reference_id)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
INSERT INTO quota (reference, reference_id, hard, creation_time, update_time)
|
||||||
|
SELECT
|
||||||
|
'project',
|
||||||
|
CAST(project_id AS VARCHAR),
|
||||||
|
'{"count": -1, "storage": -1}',
|
||||||
|
NOW(),
|
||||||
|
NOW()
|
||||||
|
FROM
|
||||||
|
project
|
||||||
|
WHERE
|
||||||
|
deleted = 'f';
|
||||||
|
|
||||||
|
INSERT INTO quota_usage (id, reference, reference_id, used, creation_time, update_time)
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
reference,
|
||||||
|
reference_id,
|
||||||
|
'{"count": 0, "storage": 0}',
|
||||||
|
creation_time,
|
||||||
|
update_time
|
||||||
|
FROM
|
||||||
|
quota;
|
@ -47,6 +47,7 @@ const (
|
|||||||
HTTPAuthGroup = "http_auth"
|
HTTPAuthGroup = "http_auth"
|
||||||
OIDCGroup = "oidc"
|
OIDCGroup = "oidc"
|
||||||
DatabaseGroup = "database"
|
DatabaseGroup = "database"
|
||||||
|
QuotaGroup = "quota"
|
||||||
// Put all config items do not belong a existing group into basic
|
// Put all config items do not belong a existing group into basic
|
||||||
BasicGroup = "basic"
|
BasicGroup = "basic"
|
||||||
ClairGroup = "clair"
|
ClairGroup = "clair"
|
||||||
@ -147,5 +148,8 @@ var (
|
|||||||
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
||||||
// the unit of expiration is minute, 43200 minutes = 30 days
|
// the unit of expiration is minute, 43200 minutes = 30 days
|
||||||
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
|
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
|
||||||
|
|
||||||
|
{Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
|
||||||
|
{Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -18,9 +18,10 @@ package metadata
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/goharbor/harbor/src/common"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
|
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
|
||||||
@ -186,3 +187,21 @@ func (t *MapType) get(str string) (interface{}, error) {
|
|||||||
err := json.Unmarshal([]byte(str), &result)
|
err := json.Unmarshal([]byte(str), &result)
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QuotaType ...
|
||||||
|
type QuotaType struct {
|
||||||
|
Int64Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *QuotaType) validate(str string) error {
|
||||||
|
val, err := strconv.ParseInt(str, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if val <= 0 && val != -1 {
|
||||||
|
return fmt.Errorf("quota value should be -1 or great than zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -142,4 +142,8 @@ const (
|
|||||||
OIDCLoginPath = "/c/oidc/login"
|
OIDCLoginPath = "/c/oidc/login"
|
||||||
|
|
||||||
ChartUploadCtxKey = contextKey("chart_upload_event")
|
ChartUploadCtxKey = contextKey("chart_upload_event")
|
||||||
|
|
||||||
|
// Quota setting items for project
|
||||||
|
CountPerProject = "count_per_project"
|
||||||
|
StoragePerProject = "storage_per_project"
|
||||||
)
|
)
|
||||||
|
@ -1034,8 +1034,10 @@ func TestIsDupRecError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestWithTransaction(t *testing.T) {
|
func TestWithTransaction(t *testing.T) {
|
||||||
|
reference := "transaction"
|
||||||
|
|
||||||
quota := models.Quota{
|
quota := models.Quota{
|
||||||
Reference: "project",
|
Reference: reference,
|
||||||
ReferenceID: "1",
|
ReferenceID: "1",
|
||||||
Hard: "{}",
|
Hard: "{}",
|
||||||
}
|
}
|
||||||
@ -1061,7 +1063,7 @@ func TestWithTransaction(t *testing.T) {
|
|||||||
|
|
||||||
if assert.Error(WithTransaction(failed)) {
|
if assert.Error(WithTransaction(failed)) {
|
||||||
var quota models.Quota
|
var quota models.Quota
|
||||||
quota.Reference = "project"
|
quota.Reference = reference
|
||||||
quota.ReferenceID = "1"
|
quota.ReferenceID = "1"
|
||||||
err := GetOrmer().Read("a, "reference", "reference_id")
|
err := GetOrmer().Read("a, "reference", "reference_id")
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
@ -1070,7 +1072,7 @@ func TestWithTransaction(t *testing.T) {
|
|||||||
|
|
||||||
if assert.Nil(WithTransaction(success)) {
|
if assert.Nil(WithTransaction(success)) {
|
||||||
var quota models.Quota
|
var quota models.Quota
|
||||||
quota.Reference = "project"
|
quota.Reference = reference
|
||||||
quota.ReferenceID = "1"
|
quota.ReferenceID = "1"
|
||||||
err := GetOrmer().Read("a, "reference", "reference_id")
|
err := GetOrmer().Read("a, "reference", "reference_id")
|
||||||
assert.Nil(err)
|
assert.Nil(err)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
quotaReference = "project"
|
quotaReference = "dao"
|
||||||
quotaUserReference = "user"
|
quotaUserReference = "user"
|
||||||
quotaHard = models.QuotaHard{"storage": 1024}
|
quotaHard = models.QuotaHard{"storage": 1024}
|
||||||
quotaHardLarger = models.QuotaHard{"storage": 2048}
|
quotaHardLarger = models.QuotaHard{"storage": 2048}
|
||||||
@ -45,6 +45,7 @@ func (suite *QuotaDaoSuite) equalHard(quota1 *models.Quota, quota2 *models.Quota
|
|||||||
|
|
||||||
func (suite *QuotaDaoSuite) TearDownTest() {
|
func (suite *QuotaDaoSuite) TearDownTest() {
|
||||||
ClearTable("quota")
|
ClearTable("quota")
|
||||||
|
ClearTable("quota_usage")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *QuotaDaoSuite) TestAddQuota() {
|
func (suite *QuotaDaoSuite) TestAddQuota() {
|
||||||
@ -98,10 +99,17 @@ func (suite *QuotaDaoSuite) TestUpdateQuota() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (suite *QuotaDaoSuite) TestListQuotas() {
|
func (suite *QuotaDaoSuite) TestListQuotas() {
|
||||||
AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()})
|
id1, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()})
|
||||||
AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "2", Hard: quotaHard.String()})
|
AddQuotaUsage(models.QuotaUsage{ID: id1, Reference: quotaReference, ReferenceID: "1", Used: "{}"})
|
||||||
AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "3", Hard: quotaHard.String()})
|
|
||||||
AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHardLarger.String()})
|
id2, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "2", Hard: quotaHard.String()})
|
||||||
|
AddQuotaUsage(models.QuotaUsage{ID: id2, Reference: quotaReference, ReferenceID: "2", Used: "{}"})
|
||||||
|
|
||||||
|
id3, _ := AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHardLarger.String()})
|
||||||
|
AddQuotaUsage(models.QuotaUsage{ID: id3, Reference: quotaUserReference, ReferenceID: "1", Used: "{}"})
|
||||||
|
|
||||||
|
id4, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "3", Hard: quotaHard.String()})
|
||||||
|
AddQuotaUsage(models.QuotaUsage{ID: id4, Reference: quotaReference, ReferenceID: "3", Used: "{}"})
|
||||||
|
|
||||||
// List all the quotas
|
// List all the quotas
|
||||||
quotas, err := ListQuotas()
|
quotas, err := ListQuotas()
|
||||||
|
@ -84,6 +84,12 @@ type OIDCSetting struct {
|
|||||||
Scope []string `json:"scope"`
|
Scope []string `json:"scope"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QuotaSetting wraps the settings for Quota
|
||||||
|
type QuotaSetting struct {
|
||||||
|
CountPerProject int64 `json:"count_per_project"`
|
||||||
|
StoragePerProject int64 `json:"storage_per_project"`
|
||||||
|
}
|
||||||
|
|
||||||
// ConfigEntry ...
|
// ConfigEntry ...
|
||||||
type ConfigEntry struct {
|
type ConfigEntry struct {
|
||||||
ID int64 `orm:"pk;auto;column(id)" json:"-"`
|
ID int64 `orm:"pk;auto;column(id)" json:"-"`
|
||||||
|
@ -168,6 +168,9 @@ type ProjectRequest struct {
|
|||||||
Public *int `json:"public"` // deprecated, reserved for project creation in replication
|
Public *int `json:"public"` // deprecated, reserved for project creation in replication
|
||||||
Metadata map[string]string `json:"metadata"`
|
Metadata map[string]string `json:"metadata"`
|
||||||
CVEWhitelist CVEWhitelist `json:"cve_whitelist"`
|
CVEWhitelist CVEWhitelist `json:"cve_whitelist"`
|
||||||
|
|
||||||
|
CountLimit *int64 `json:"count_limit,omitempty"`
|
||||||
|
StorageLimit *int64 `json:"storage_limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProjectQueryResult ...
|
// ProjectQueryResult ...
|
||||||
|
59
src/common/quota/driver/driver.go
Normal file
59
src/common/quota/driver/driver.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package driver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
driversMu sync.RWMutex
|
||||||
|
drivers = map[string]Driver{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// RefObject type for quota ref object
|
||||||
|
type RefObject map[string]interface{}
|
||||||
|
|
||||||
|
// Driver the driver for quota
|
||||||
|
type Driver interface {
|
||||||
|
// HardLimits returns default resource list
|
||||||
|
HardLimits() types.ResourceList
|
||||||
|
// Load returns quota ref object by key
|
||||||
|
Load(key string) (RefObject, error)
|
||||||
|
// Validate validate the hard limits
|
||||||
|
Validate(hardLimits types.ResourceList) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register register quota driver
|
||||||
|
func Register(name string, driver Driver) {
|
||||||
|
driversMu.Lock()
|
||||||
|
defer driversMu.Unlock()
|
||||||
|
if driver == nil {
|
||||||
|
panic("quota: Register driver is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
drivers[name] = driver
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns quota driver by name
|
||||||
|
func Get(name string) (Driver, bool) {
|
||||||
|
driversMu.Lock()
|
||||||
|
defer driversMu.Unlock()
|
||||||
|
|
||||||
|
driver, ok := drivers[name]
|
||||||
|
return driver, ok
|
||||||
|
}
|
65
src/common/quota/driver/mocks/driver.go
Normal file
65
src/common/quota/driver/mocks/driver.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||||
|
|
||||||
|
package mocks
|
||||||
|
|
||||||
|
import driver "github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
import mock "github.com/stretchr/testify/mock"
|
||||||
|
import types "github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
|
||||||
|
// Driver is an autogenerated mock type for the Driver type
|
||||||
|
type Driver struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// HardLimits provides a mock function with given fields:
|
||||||
|
func (_m *Driver) HardLimits() types.ResourceList {
|
||||||
|
ret := _m.Called()
|
||||||
|
|
||||||
|
var r0 types.ResourceList
|
||||||
|
if rf, ok := ret.Get(0).(func() types.ResourceList); ok {
|
||||||
|
r0 = rf()
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(types.ResourceList)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load provides a mock function with given fields: key
|
||||||
|
func (_m *Driver) Load(key string) (driver.RefObject, error) {
|
||||||
|
ret := _m.Called(key)
|
||||||
|
|
||||||
|
var r0 driver.RefObject
|
||||||
|
if rf, ok := ret.Get(0).(func(string) driver.RefObject); ok {
|
||||||
|
r0 = rf(key)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(driver.RefObject)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||||
|
r1 = rf(key)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate provides a mock function with given fields: resources
|
||||||
|
func (_m *Driver) Validate(resources types.ResourceList) error {
|
||||||
|
ret := _m.Called(resources)
|
||||||
|
|
||||||
|
var r0 error
|
||||||
|
if rf, ok := ret.Get(0).(func(types.ResourceList) error); ok {
|
||||||
|
r0 = rf(resources)
|
||||||
|
} else {
|
||||||
|
r0 = ret.Error(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0
|
||||||
|
}
|
143
src/common/quota/driver/project/driver.go
Normal file
143
src/common/quota/driver/project/driver.go
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package project
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common"
|
||||||
|
"github.com/goharbor/harbor/src/common/config"
|
||||||
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
dr "github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
"github.com/graph-gophers/dataloader"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
dr.Register("project", newDriver())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader.Result {
|
||||||
|
handleError := func(err error) []*dataloader.Result {
|
||||||
|
var results []*dataloader.Result
|
||||||
|
var result dataloader.Result
|
||||||
|
result.Error = err
|
||||||
|
results = append(results, &result)
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
var projectIDs []int64
|
||||||
|
for _, key := range keys {
|
||||||
|
id, err := strconv.ParseInt(key.String(), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return handleError(err)
|
||||||
|
}
|
||||||
|
projectIDs = append(projectIDs, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
projects, err := dao.GetProjects(&models.ProjectQueryParam{})
|
||||||
|
if err != nil {
|
||||||
|
return handleError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var projectsMap = make(map[int64]*models.Project, len(projectIDs))
|
||||||
|
for _, project := range projects {
|
||||||
|
projectsMap[project.ProjectID] = project
|
||||||
|
}
|
||||||
|
|
||||||
|
var results []*dataloader.Result
|
||||||
|
for _, projectID := range projectIDs {
|
||||||
|
project, ok := projectsMap[projectID]
|
||||||
|
if !ok {
|
||||||
|
return handleError(fmt.Errorf("project not found, "+"project_id: %d", projectID))
|
||||||
|
}
|
||||||
|
|
||||||
|
result := dataloader.Result{
|
||||||
|
Data: project,
|
||||||
|
Error: nil,
|
||||||
|
}
|
||||||
|
results = append(results, &result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
type driver struct {
|
||||||
|
cfg *config.CfgManager
|
||||||
|
loader *dataloader.Loader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driver) HardLimits() types.ResourceList {
|
||||||
|
return types.ResourceList{
|
||||||
|
types.ResourceCount: d.cfg.Get(common.CountPerProject).GetInt64(),
|
||||||
|
types.ResourceStorage: d.cfg.Get(common.StoragePerProject).GetInt64(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driver) Load(key string) (dr.RefObject, error) {
|
||||||
|
thunk := d.loader.Load(context.TODO(), dataloader.StringKey(key))
|
||||||
|
|
||||||
|
result, err := thunk()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
project, ok := result.(*models.Project)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("bad result for project: %s", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dr.RefObject{
|
||||||
|
"id": project.ProjectID,
|
||||||
|
"name": project.Name,
|
||||||
|
"owner_name": project.OwnerName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *driver) Validate(hardLimits types.ResourceList) error {
|
||||||
|
resources := map[types.ResourceName]bool{
|
||||||
|
types.ResourceCount: true,
|
||||||
|
types.ResourceStorage: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
for resource, value := range hardLimits {
|
||||||
|
if !resources[resource] {
|
||||||
|
return fmt.Errorf("resource %s not support", resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value <= 0 && value != types.UNLIMITED {
|
||||||
|
return fmt.Errorf("invalid value for resource %s", resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for resource := range resources {
|
||||||
|
if _, found := hardLimits[resource]; !found {
|
||||||
|
return fmt.Errorf("resource %s not found", resource)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDriver() dr.Driver {
|
||||||
|
cfg := config.NewDBCfgManager()
|
||||||
|
|
||||||
|
loader := dataloader.NewBatchedLoader(getProjectsBatchFn)
|
||||||
|
|
||||||
|
return &driver{cfg: cfg, loader: loader}
|
||||||
|
}
|
77
src/common/quota/driver/project/driver_test.go
Normal file
77
src/common/quota/driver/project/driver_test.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package project
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
dr "github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DriverSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *DriverSuite) TestHardLimits() {
|
||||||
|
driver := newDriver()
|
||||||
|
|
||||||
|
suite.Equal(types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}, driver.HardLimits())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *DriverSuite) TestLoad() {
|
||||||
|
driver := newDriver()
|
||||||
|
|
||||||
|
if ref, err := driver.Load("1"); suite.Nil(err) {
|
||||||
|
obj := dr.RefObject{
|
||||||
|
"id": int64(1),
|
||||||
|
"name": "library",
|
||||||
|
"owner_name": "",
|
||||||
|
}
|
||||||
|
|
||||||
|
suite.Equal(obj, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref, err := driver.Load("100000"); suite.Error(err) {
|
||||||
|
suite.Empty(ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ref, err := driver.Load("library"); suite.Error(err) {
|
||||||
|
suite.Empty(ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *DriverSuite) TestValidate() {
|
||||||
|
driver := newDriver()
|
||||||
|
|
||||||
|
suite.Nil(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1024}))
|
||||||
|
suite.Error(driver.Validate(types.ResourceList{}))
|
||||||
|
suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1}))
|
||||||
|
suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0}))
|
||||||
|
suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceName("foo"): 1}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
dao.PrepareTestForPostgresSQL()
|
||||||
|
|
||||||
|
os.Exit(m.Run())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRunDriverSuite(t *testing.T) {
|
||||||
|
suite.Run(t, new(DriverSuite))
|
||||||
|
}
|
@ -15,20 +15,24 @@
|
|||||||
package quota
|
package quota
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/astaxie/beego/orm"
|
"github.com/astaxie/beego/orm"
|
||||||
"github.com/goharbor/harbor/src/common/dao"
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
"github.com/goharbor/harbor/src/common/models"
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Manager manager for quota
|
// Manager manager for quota
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
|
driver driver.Driver
|
||||||
reference string
|
reference string
|
||||||
referenceID string
|
referenceID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) addQuota(o orm.Ormer, hardLimits ResourceList, now time.Time) (int64, error) {
|
func (m *Manager) addQuota(o orm.Ormer, hardLimits types.ResourceList, now time.Time) (int64, error) {
|
||||||
quota := &models.Quota{
|
quota := &models.Quota{
|
||||||
Reference: m.reference,
|
Reference: m.reference,
|
||||||
ReferenceID: m.referenceID,
|
ReferenceID: m.referenceID,
|
||||||
@ -40,7 +44,7 @@ func (m *Manager) addQuota(o orm.Ormer, hardLimits ResourceList, now time.Time)
|
|||||||
return o.Insert(quota)
|
return o.Insert(quota)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) addUsage(o orm.Ormer, hardLimits, used ResourceList, now time.Time, ids ...int64) (int64, error) {
|
func (m *Manager) addUsage(o orm.Ormer, used types.ResourceList, now time.Time, ids ...int64) (int64, error) {
|
||||||
usage := &models.QuotaUsage{
|
usage := &models.QuotaUsage{
|
||||||
Reference: m.reference,
|
Reference: m.reference,
|
||||||
ReferenceID: m.referenceID,
|
ReferenceID: m.referenceID,
|
||||||
@ -56,9 +60,39 @@ func (m *Manager) addUsage(o orm.Ormer, hardLimits, used ResourceList, now time.
|
|||||||
return o.Insert(usage)
|
return o.Insert(usage)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Manager) newQuota(o orm.Ormer, hardLimits types.ResourceList, usages ...types.ResourceList) (int64, error) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
id, err := m.addQuota(o, hardLimits, now)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var used types.ResourceList
|
||||||
|
if len(usages) > 0 {
|
||||||
|
used = usages[0]
|
||||||
|
} else {
|
||||||
|
used = types.Zero(hardLimits)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := m.addUsage(o, used, now, id); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Manager) getQuotaForUpdate(o orm.Ormer) (*models.Quota, error) {
|
func (m *Manager) getQuotaForUpdate(o orm.Ormer) (*models.Quota, error) {
|
||||||
quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID}
|
quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID}
|
||||||
if err := o.ReadForUpdate(quota, "reference", "reference_id"); err != nil {
|
if err := o.ReadForUpdate(quota, "reference", "reference_id"); err != nil {
|
||||||
|
if err == orm.ErrNoRows {
|
||||||
|
if _, err := m.newQuota(o, m.driver.HardLimits()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.getQuotaForUpdate(o)
|
||||||
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,12 +108,14 @@ func (m *Manager) getUsageForUpdate(o orm.Ormer) (*models.QuotaUsage, error) {
|
|||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) updateUsage(o orm.Ormer, resources ResourceList, calculate func(ResourceList, ResourceList) ResourceList) error {
|
func (m *Manager) updateUsage(o orm.Ormer, resources types.ResourceList,
|
||||||
|
calculate func(types.ResourceList, types.ResourceList) types.ResourceList) error {
|
||||||
|
|
||||||
quota, err := m.getQuotaForUpdate(o)
|
quota, err := m.getQuotaForUpdate(o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
hardLimits, err := NewResourceList(quota.Hard)
|
hardLimits, err := types.NewResourceList(quota.Hard)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -88,7 +124,7 @@ func (m *Manager) updateUsage(o orm.Ormer, resources ResourceList, calculate fun
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
used, err := NewResourceList(usage.Used)
|
used, err := types.NewResourceList(usage.Used)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -106,56 +142,76 @@ func (m *Manager) updateUsage(o orm.Ormer, resources ResourceList, calculate fun
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewQuota create new quota for (reference, reference id)
|
// NewQuota create new quota for (reference, reference id)
|
||||||
func (m *Manager) NewQuota(hardLimits ResourceList, usages ...ResourceList) (int64, error) {
|
func (m *Manager) NewQuota(hardLimit types.ResourceList, usages ...types.ResourceList) (int64, error) {
|
||||||
var quotaID int64
|
var id int64
|
||||||
|
err := dao.WithTransaction(func(o orm.Ormer) (err error) {
|
||||||
err := dao.WithTransaction(func(o orm.Ormer) error {
|
id, err = m.newQuota(o, hardLimit, usages...)
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
quotaID, err = m.addQuota(o, hardLimits, now)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
var used ResourceList
|
|
||||||
if len(usages) > 0 {
|
|
||||||
used = usages[0]
|
|
||||||
} else {
|
|
||||||
used = ResourceList{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := m.addUsage(o, hardLimits, used, now, quotaID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return quotaID, nil
|
return id, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteQuota delete the quota
|
||||||
|
func (m *Manager) DeleteQuota() error {
|
||||||
|
return dao.WithTransaction(func(o orm.Ormer) error {
|
||||||
|
quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID}
|
||||||
|
if _, err := o.Delete(quota, "reference", "reference_id"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := &models.QuotaUsage{Reference: m.reference, ReferenceID: m.referenceID}
|
||||||
|
if _, err := o.Delete(usage, "reference", "reference_id"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateQuota update the quota resource spec
|
||||||
|
func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error {
|
||||||
|
if err := m.driver.Validate(hardLimits); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?`
|
||||||
|
_, err := dao.GetOrmer().Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddResources add resources to usage
|
// AddResources add resources to usage
|
||||||
func (m *Manager) AddResources(resources ResourceList) error {
|
func (m *Manager) AddResources(resources types.ResourceList) error {
|
||||||
return dao.WithTransaction(func(o orm.Ormer) error {
|
return dao.WithTransaction(func(o orm.Ormer) error {
|
||||||
return m.updateUsage(o, resources, Add)
|
return m.updateUsage(o, resources, types.Add)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SubtractResources subtract resources from usage
|
// SubtractResources subtract resources from usage
|
||||||
func (m *Manager) SubtractResources(resources ResourceList) error {
|
func (m *Manager) SubtractResources(resources types.ResourceList) error {
|
||||||
return dao.WithTransaction(func(o orm.Ormer) error {
|
return dao.WithTransaction(func(o orm.Ormer) error {
|
||||||
return m.updateUsage(o, resources, Subtract)
|
return m.updateUsage(o, resources, types.Subtract)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager returns quota manager
|
// NewManager returns quota manager
|
||||||
func NewManager(reference string, referenceID string) (*Manager, error) {
|
func NewManager(reference string, referenceID string) (*Manager, error) {
|
||||||
|
d, ok := driver.Get(reference)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("quota not support for %s", reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.Load(referenceID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &Manager{
|
return &Manager{
|
||||||
|
driver: d,
|
||||||
reference: reference,
|
reference: reference,
|
||||||
referenceID: referenceID,
|
referenceID: referenceID,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -15,21 +15,47 @@
|
|||||||
package quota
|
package quota
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/goharbor/harbor/src/common/dao"
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
|
"github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
"github.com/goharbor/harbor/src/common/quota/driver/mocks"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/suite"
|
"github.com/stretchr/testify/suite"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hardLimits = ResourceList{ResourceStorage: 1000}
|
hardLimits = types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000}
|
||||||
referenceProject = "project"
|
reference = "mock"
|
||||||
)
|
)
|
||||||
|
|
||||||
func mustResourceList(s string) ResourceList {
|
func init() {
|
||||||
resources, _ := NewResourceList(s)
|
mockDriver := &mocks.Driver{}
|
||||||
|
|
||||||
|
mockHardLimitsFn := func() types.ResourceList {
|
||||||
|
return types.ResourceList{
|
||||||
|
types.ResourceCount: -1,
|
||||||
|
types.ResourceStorage: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mockLoadFn := func(key string) driver.RefObject {
|
||||||
|
return driver.RefObject{"id": key}
|
||||||
|
}
|
||||||
|
|
||||||
|
mockDriver.On("HardLimits").Return(mockHardLimitsFn)
|
||||||
|
mockDriver.On("Load", mock.AnythingOfType("string")).Return(mockLoadFn, nil)
|
||||||
|
mockDriver.On("Validate", mock.AnythingOfType("types.ResourceList")).Return(nil)
|
||||||
|
|
||||||
|
driver.Register(reference, mockDriver)
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustResourceList(s string) types.ResourceList {
|
||||||
|
resources, _ := types.NewResourceList(s)
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,13 +63,20 @@ type ManagerSuite struct {
|
|||||||
suite.Suite
|
suite.Suite
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *ManagerSuite) SetupTest() {
|
||||||
|
_, ok := driver.Get(reference)
|
||||||
|
if !ok {
|
||||||
|
suite.Fail("driver not found for %s", reference)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (suite *ManagerSuite) quotaManager(referenceIDs ...string) *Manager {
|
func (suite *ManagerSuite) quotaManager(referenceIDs ...string) *Manager {
|
||||||
referenceID := "1"
|
referenceID := "1"
|
||||||
if len(referenceIDs) > 0 {
|
if len(referenceIDs) > 0 {
|
||||||
referenceID = referenceIDs[0]
|
referenceID = referenceIDs[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
mgr, _ := NewManager(referenceProject, referenceID)
|
mgr, _ := NewManager(reference, referenceID)
|
||||||
return mgr
|
return mgr
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,17 +90,53 @@ func (suite *ManagerSuite) TestNewQuota() {
|
|||||||
|
|
||||||
if id, err := mgr.NewQuota(hardLimits); suite.Nil(err) {
|
if id, err := mgr.NewQuota(hardLimits); suite.Nil(err) {
|
||||||
quota, _ := dao.GetQuota(id)
|
quota, _ := dao.GetQuota(id)
|
||||||
suite.True(Equals(hardLimits, mustResourceList(quota.Hard)))
|
suite.Equal(hardLimits, mustResourceList(quota.Hard))
|
||||||
}
|
}
|
||||||
|
|
||||||
mgr = suite.quotaManager("2")
|
mgr = suite.quotaManager("2")
|
||||||
used := ResourceList{ResourceStorage: 100}
|
used := types.ResourceList{types.ResourceStorage: 100}
|
||||||
if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
|
if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
|
||||||
quota, _ := dao.GetQuota(id)
|
quota, _ := dao.GetQuota(id)
|
||||||
suite.True(Equals(hardLimits, mustResourceList(quota.Hard)))
|
suite.Equal(hardLimits, mustResourceList(quota.Hard))
|
||||||
|
|
||||||
usage, _ := dao.GetQuotaUsage(id)
|
usage, _ := dao.GetQuotaUsage(id)
|
||||||
suite.True(Equals(used, mustResourceList(usage.Used)))
|
suite.Equal(used, mustResourceList(usage.Used))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ManagerSuite) TestDeleteQuota() {
|
||||||
|
mgr := suite.quotaManager()
|
||||||
|
|
||||||
|
id, err := mgr.NewQuota(hardLimits)
|
||||||
|
if suite.Nil(err) {
|
||||||
|
quota, _ := dao.GetQuota(id)
|
||||||
|
suite.Equal(hardLimits, mustResourceList(quota.Hard))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mgr.DeleteQuota(); suite.Nil(err) {
|
||||||
|
quota, _ := dao.GetQuota(id)
|
||||||
|
suite.Nil(quota)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ManagerSuite) TestUpdateQuota() {
|
||||||
|
mgr := suite.quotaManager()
|
||||||
|
|
||||||
|
id, _ := mgr.NewQuota(hardLimits)
|
||||||
|
largeHardLimits := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000000}
|
||||||
|
|
||||||
|
if err := mgr.UpdateQuota(largeHardLimits); suite.Nil(err) {
|
||||||
|
quota, _ := dao.GetQuota(id)
|
||||||
|
suite.Equal(largeHardLimits, mustResourceList(quota.Hard))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *ManagerSuite) TestQuotaAutoCreation() {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mgr := suite.quotaManager(fmt.Sprintf("%d", i))
|
||||||
|
resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
|
||||||
|
|
||||||
|
suite.Nil(mgr.AddResources(resource))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,19 +144,19 @@ func (suite *ManagerSuite) TestAddResources() {
|
|||||||
mgr := suite.quotaManager()
|
mgr := suite.quotaManager()
|
||||||
id, _ := mgr.NewQuota(hardLimits)
|
id, _ := mgr.NewQuota(hardLimits)
|
||||||
|
|
||||||
resource := ResourceList{ResourceStorage: 100}
|
resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
|
||||||
|
|
||||||
if suite.Nil(mgr.AddResources(resource)) {
|
if suite.Nil(mgr.AddResources(resource)) {
|
||||||
usage, _ := dao.GetQuotaUsage(id)
|
usage, _ := dao.GetQuotaUsage(id)
|
||||||
suite.True(Equals(resource, mustResourceList(usage.Used)))
|
suite.Equal(resource, mustResourceList(usage.Used))
|
||||||
}
|
}
|
||||||
|
|
||||||
if suite.Nil(mgr.AddResources(resource)) {
|
if suite.Nil(mgr.AddResources(resource)) {
|
||||||
usage, _ := dao.GetQuotaUsage(id)
|
usage, _ := dao.GetQuotaUsage(id)
|
||||||
suite.True(Equals(ResourceList{ResourceStorage: 200}, mustResourceList(usage.Used)))
|
suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 200}, mustResourceList(usage.Used))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mgr.AddResources(ResourceList{ResourceStorage: 10000}); suite.Error(err) {
|
if err := mgr.AddResources(types.ResourceList{types.ResourceStorage: 10000}); suite.Error(err) {
|
||||||
suite.True(IsUnsafeError(err))
|
suite.True(IsUnsafeError(err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -96,16 +165,16 @@ func (suite *ManagerSuite) TestSubtractResources() {
|
|||||||
mgr := suite.quotaManager()
|
mgr := suite.quotaManager()
|
||||||
id, _ := mgr.NewQuota(hardLimits)
|
id, _ := mgr.NewQuota(hardLimits)
|
||||||
|
|
||||||
resource := ResourceList{ResourceStorage: 100}
|
resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
|
||||||
|
|
||||||
if suite.Nil(mgr.AddResources(resource)) {
|
if suite.Nil(mgr.AddResources(resource)) {
|
||||||
usage, _ := dao.GetQuotaUsage(id)
|
usage, _ := dao.GetQuotaUsage(id)
|
||||||
suite.True(Equals(resource, mustResourceList(usage.Used)))
|
suite.Equal(resource, mustResourceList(usage.Used))
|
||||||
}
|
}
|
||||||
|
|
||||||
if suite.Nil(mgr.SubtractResources(resource)) {
|
if suite.Nil(mgr.SubtractResources(resource)) {
|
||||||
usage, _ := dao.GetQuotaUsage(id)
|
usage, _ := dao.GetQuotaUsage(id)
|
||||||
suite.True(Equals(ResourceList{ResourceStorage: 0}, mustResourceList(usage.Used)))
|
suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 0}, mustResourceList(usage.Used))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -113,8 +182,8 @@ func (suite *ManagerSuite) TestRaceAddResources() {
|
|||||||
mgr := suite.quotaManager()
|
mgr := suite.quotaManager()
|
||||||
mgr.NewQuota(hardLimits)
|
mgr.NewQuota(hardLimits)
|
||||||
|
|
||||||
resources := ResourceList{
|
resources := types.ResourceList{
|
||||||
ResourceStorage: 100,
|
types.ResourceStorage: 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@ -142,10 +211,10 @@ func (suite *ManagerSuite) TestRaceAddResources() {
|
|||||||
|
|
||||||
func (suite *ManagerSuite) TestRaceSubtractResources() {
|
func (suite *ManagerSuite) TestRaceSubtractResources() {
|
||||||
mgr := suite.quotaManager()
|
mgr := suite.quotaManager()
|
||||||
mgr.NewQuota(hardLimits, hardLimits)
|
mgr.NewQuota(hardLimits, types.ResourceList{types.ResourceStorage: 1000})
|
||||||
|
|
||||||
resources := ResourceList{
|
resources := types.ResourceList{
|
||||||
ResourceStorage: 100,
|
types.ResourceStorage: 100,
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@ -189,11 +258,11 @@ func BenchmarkAddResources(b *testing.B) {
|
|||||||
dao.ClearTable("quota_usage")
|
dao.ClearTable("quota_usage")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
mgr, _ := NewManager(referenceProject, "1")
|
mgr, _ := NewManager(reference, "1")
|
||||||
mgr.NewQuota(ResourceList{ResourceStorage: int64(b.N)})
|
mgr.NewQuota(types.ResourceList{types.ResourceStorage: int64(b.N)})
|
||||||
|
|
||||||
resource := ResourceList{
|
resource := types.ResourceList{
|
||||||
ResourceStorage: 1,
|
types.ResourceStorage: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@ -209,11 +278,11 @@ func BenchmarkAddResourcesParallel(b *testing.B) {
|
|||||||
dao.ClearTable("quota_usage")
|
dao.ClearTable("quota_usage")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
mgr, _ := NewManager(referenceProject, "1")
|
mgr, _ := NewManager(reference, "1")
|
||||||
mgr.NewQuota(ResourceList{ResourceStorage: -1})
|
mgr.NewQuota(types.ResourceList{})
|
||||||
|
|
||||||
resource := ResourceList{
|
resource := types.ResourceList{
|
||||||
ResourceStorage: 1,
|
types.ResourceStorage: 1,
|
||||||
}
|
}
|
||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
35
src/common/quota/quota.go
Normal file
35
src/common/quota/quota.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package quota
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/common/quota/driver"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
|
||||||
|
// project driver for quota
|
||||||
|
_ "github.com/goharbor/harbor/src/common/quota/driver/project"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validate validate hard limits
|
||||||
|
func Validate(reference string, hardLimits types.ResourceList) error {
|
||||||
|
d, ok := driver.Get(reference)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("quota not support for %s", reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
return d.Validate(hardLimits)
|
||||||
|
}
|
45
src/common/quota/quota_test.go
Normal file
45
src/common/quota/quota_test.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package quota
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
_ "github.com/goharbor/harbor/src/common/quota/driver/project"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestValidate(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
reference string
|
||||||
|
hardLimits types.ResourceList
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{"valid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1}}, false},
|
||||||
|
{"invalid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0}}, true},
|
||||||
|
{"not support", args{"not support", types.ResourceList{types.ResourceCount: 1}}, true},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if err := Validate(tt.args.reference, tt.args.hardLimits); (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
32
src/common/quota/types.go
Normal file
32
src/common/quota/types.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright Project Harbor Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package quota
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ResourceCount alias types.ResourceCount
|
||||||
|
ResourceCount = types.ResourceCount
|
||||||
|
// ResourceStorage alias types.ResourceStorage
|
||||||
|
ResourceStorage = types.ResourceStorage
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceName alias types.ResourceName
|
||||||
|
type ResourceName = types.ResourceName
|
||||||
|
|
||||||
|
// ResourceList alias types.ResourceList
|
||||||
|
type ResourceList = types.ResourceList
|
@ -16,11 +16,8 @@ package quota
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
// UNLIMITED unlimited quota value
|
|
||||||
UNLIMITED = int64(-1)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type unsafe struct {
|
type unsafe struct {
|
||||||
@ -41,14 +38,14 @@ func IsUnsafeError(err error) bool {
|
|||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func isSafe(hardLimits ResourceList, used ResourceList) error {
|
func isSafe(hardLimits types.ResourceList, used types.ResourceList) error {
|
||||||
for key, value := range used {
|
for key, value := range used {
|
||||||
if value < 0 {
|
if value < 0 {
|
||||||
return newUnsafe(fmt.Sprintf("bad used value: %d", value))
|
return newUnsafe(fmt.Sprintf("bad used value: %d", value))
|
||||||
}
|
}
|
||||||
|
|
||||||
if hard, found := hardLimits[key]; found {
|
if hard, found := hardLimits[key]; found {
|
||||||
if hard == UNLIMITED {
|
if hard == types.UNLIMITED {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,6 +17,8 @@ package quota
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestIsUnsafeError(t *testing.T) {
|
func TestIsUnsafeError(t *testing.T) {
|
||||||
@ -50,8 +52,8 @@ func TestIsUnsafeError(t *testing.T) {
|
|||||||
|
|
||||||
func Test_checkQuotas(t *testing.T) {
|
func Test_checkQuotas(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
hardLimits ResourceList
|
hardLimits types.ResourceList
|
||||||
used ResourceList
|
used types.ResourceList
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@ -60,27 +62,27 @@ func Test_checkQuotas(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"unlimited",
|
"unlimited",
|
||||||
args{hardLimits: ResourceList{ResourceStorage: UNLIMITED}, used: ResourceList{ResourceStorage: 1000}},
|
args{hardLimits: types.ResourceList{types.ResourceStorage: types.UNLIMITED}, used: types.ResourceList{types.ResourceStorage: 1000}},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ok",
|
"ok",
|
||||||
args{hardLimits: ResourceList{ResourceStorage: 100}, used: ResourceList{ResourceStorage: 1}},
|
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: 1}},
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"bad used value",
|
"bad used value",
|
||||||
args{hardLimits: ResourceList{ResourceStorage: 100}, used: ResourceList{ResourceStorage: -1}},
|
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: -1}},
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"over the hard limit",
|
"over the hard limit",
|
||||||
args{hardLimits: ResourceList{ResourceStorage: 100}, used: ResourceList{ResourceStorage: 200}},
|
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceStorage: 200}},
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"hard limit not found",
|
"hard limit not found",
|
||||||
args{hardLimits: ResourceList{ResourceStorage: 100}, used: ResourceList{ResourceCount: 1}},
|
args{hardLimits: types.ResourceList{types.ResourceStorage: 100}, used: types.ResourceList{types.ResourceCount: 1}},
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -24,11 +24,13 @@ import (
|
|||||||
"github.com/goharbor/harbor/src/common"
|
"github.com/goharbor/harbor/src/common"
|
||||||
"github.com/goharbor/harbor/src/common/dao"
|
"github.com/goharbor/harbor/src/common/dao"
|
||||||
"github.com/goharbor/harbor/src/common/models"
|
"github.com/goharbor/harbor/src/common/models"
|
||||||
|
"github.com/goharbor/harbor/src/common/quota"
|
||||||
"github.com/goharbor/harbor/src/common/rbac"
|
"github.com/goharbor/harbor/src/common/rbac"
|
||||||
"github.com/goharbor/harbor/src/common/utils"
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
errutil "github.com/goharbor/harbor/src/common/utils/error"
|
errutil "github.com/goharbor/harbor/src/common/utils/error"
|
||||||
"github.com/goharbor/harbor/src/common/utils/log"
|
"github.com/goharbor/harbor/src/common/utils/log"
|
||||||
"github.com/goharbor/harbor/src/core/config"
|
"github.com/goharbor/harbor/src/core/config"
|
||||||
|
"github.com/goharbor/harbor/src/pkg/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -127,6 +129,7 @@ func (p *ProjectAPI) Post() {
|
|||||||
p.SendBadRequestError(err)
|
p.SendBadRequestError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = validateProjectReq(pro)
|
err = validateProjectReq(pro)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Invalid project request, error: %v", err)
|
log.Errorf("Invalid project request, error: %v", err)
|
||||||
@ -134,6 +137,25 @@ func (p *ProjectAPI) Post() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
setting, err := config.QuotaSetting()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to get quota setting: %v", err)
|
||||||
|
p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.SecurityCtx.IsSysAdmin() {
|
||||||
|
pro.CountLimit = &setting.CountPerProject
|
||||||
|
pro.StorageLimit = &setting.StoragePerProject
|
||||||
|
}
|
||||||
|
|
||||||
|
hardLimits, err := projectQuotaHardLimits(pro, setting)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Invalid project request, error: %v", err)
|
||||||
|
p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
exist, err := p.ProjectMgr.Exists(pro.Name)
|
exist, err := p.ProjectMgr.Exists(pro.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s",
|
p.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s",
|
||||||
@ -188,6 +210,16 @@ func (p *ProjectAPI) Post() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
|
||||||
|
if err != nil {
|
||||||
|
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
|
||||||
|
p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err = dao.AddAccessLog(
|
if err = dao.AddAccessLog(
|
||||||
models.AccessLog{
|
models.AccessLog{
|
||||||
@ -262,6 +294,16 @@ func (p *ProjectAPI) Delete() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
quotaMgr, err := quota.NewManager("project", strconv.FormatInt(p.project.ProjectID, 10))
|
||||||
|
if err != nil {
|
||||||
|
p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := quotaMgr.DeleteQuota(); err != nil {
|
||||||
|
p.SendInternalServerError(fmt.Errorf("failed to delete quota for project: %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := dao.AddAccessLog(models.AccessLog{
|
if err := dao.AddAccessLog(models.AccessLog{
|
||||||
Username: p.SecurityCtx.GetUsername(),
|
Username: p.SecurityCtx.GetUsername(),
|
||||||
@ -555,3 +597,24 @@ func validateProjectReq(req *models.ProjectRequest) error {
|
|||||||
req.Metadata = metas
|
req.Metadata = metas
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSetting) (types.ResourceList, error) {
|
||||||
|
hardLimits := types.ResourceList{}
|
||||||
|
if req.CountLimit != nil {
|
||||||
|
hardLimits[types.ResourceCount] = *req.CountLimit
|
||||||
|
} else {
|
||||||
|
hardLimits[types.ResourceCount] = setting.CountPerProject
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.StorageLimit != nil {
|
||||||
|
hardLimits[types.ResourceStorage] = *req.StorageLimit
|
||||||
|
} else {
|
||||||
|
hardLimits[types.ResourceStorage] = setting.StoragePerProject
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := quota.Validate("project", hardLimits); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hardLimits, nil
|
||||||
|
}
|
||||||
|
@ -90,6 +90,31 @@ func TestAddProject(t *testing.T) {
|
|||||||
assert.Equal(int(400), result, "case 4 : response code = 400 : Project name is illegal in length ")
|
assert.Equal(int(400), result, "case 4 : response code = 400 : Project name is illegal in length ")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// case 5: response code = 201 : expect project creation with quota success.
|
||||||
|
fmt.Println("case 5 : response code = 201 : expect project creation with quota success ")
|
||||||
|
|
||||||
|
var countLimit, storageLimit int64
|
||||||
|
countLimit, storageLimit = 100, 10
|
||||||
|
result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit})
|
||||||
|
if err != nil {
|
||||||
|
t.Error("Error while creat project", err.Error())
|
||||||
|
t.Log(err)
|
||||||
|
} else {
|
||||||
|
assert.Equal(int(201), result, "case 5 : response code = 201 : expect project creation with quota success ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// case 6: response code = 400 : bad quota value, create project fail
|
||||||
|
fmt.Println("case 6: response code = 400 : bad quota value, create project fail")
|
||||||
|
|
||||||
|
countLimit, storageLimit = 100, -2
|
||||||
|
result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit})
|
||||||
|
if err != nil {
|
||||||
|
t.Error("Error while creat project", err.Error())
|
||||||
|
t.Log(err)
|
||||||
|
} else {
|
||||||
|
assert.Equal(int(400), result, "case 6: response code = 400 : bad quota value, create project fail")
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -230,7 +255,7 @@ func TestDeleteProject(t *testing.T) {
|
|||||||
t.Error("Error while delete project", err.Error())
|
t.Error("Error while delete project", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(int(401), httpStatusCode, "Case 1: Project creation status should be 401")
|
assert.Equal(int(401), httpStatusCode, "Case 1: Project deletion status should be 401")
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------case 2: Response Code=200---------------------------------//
|
// --------------------------case 2: Response Code=200---------------------------------//
|
||||||
@ -240,7 +265,7 @@ func TestDeleteProject(t *testing.T) {
|
|||||||
t.Error("Error while delete project", err.Error())
|
t.Error("Error while delete project", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(int(200), httpStatusCode, "Case 2: Project creation status should be 200")
|
assert.Equal(int(200), httpStatusCode, "Case 2: Project deletion status should be 200")
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------case 3: Response Code=404,Project does not exist---------------------------------//
|
// --------------------------case 3: Response Code=404,Project does not exist---------------------------------//
|
||||||
@ -251,7 +276,7 @@ func TestDeleteProject(t *testing.T) {
|
|||||||
t.Error("Error while delete project", err.Error())
|
t.Error("Error while delete project", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(int(404), httpStatusCode, "Case 3: Project creation status should be 404")
|
assert.Equal(int(404), httpStatusCode, "Case 3: Project deletion status should be 404")
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------case 4: Response Code=400,Invalid project id.---------------------------------//
|
// --------------------------case 4: Response Code=400,Invalid project id.---------------------------------//
|
||||||
@ -262,7 +287,7 @@ func TestDeleteProject(t *testing.T) {
|
|||||||
t.Error("Error while delete project", err.Error())
|
t.Error("Error while delete project", err.Error())
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
} else {
|
} else {
|
||||||
assert.Equal(int(400), httpStatusCode, "Case 4: Project creation status should be 400")
|
assert.Equal(int(400), httpStatusCode, "Case 4: Project deletion status should be 400")
|
||||||
}
|
}
|
||||||
fmt.Printf("\n")
|
fmt.Printf("\n")
|
||||||
|
|
||||||
|
@ -510,3 +510,14 @@ func OIDCSetting() (*models.OIDCSetting, error) {
|
|||||||
Scope: scope,
|
Scope: scope,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// QuotaSetting returns the setting of quota.
|
||||||
|
func QuotaSetting() (*models.QuotaSetting, error) {
|
||||||
|
if err := cfgMgr.Load(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &models.QuotaSetting{
|
||||||
|
CountPerProject: cfgMgr.Get(common.CountPerProject).GetInt64(),
|
||||||
|
StoragePerProject: cfgMgr.Get(common.StoragePerProject).GetInt64(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
@ -2,6 +2,8 @@ module github.com/goharbor/harbor/src
|
|||||||
|
|
||||||
go 1.12
|
go 1.12
|
||||||
|
|
||||||
|
replace github.com/goharbor/harbor => ../
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Knetic/govaluate v3.0.0+incompatible // indirect
|
github.com/Knetic/govaluate v3.0.0+incompatible // indirect
|
||||||
github.com/Masterminds/semver v1.4.2
|
github.com/Masterminds/semver v1.4.2
|
||||||
@ -44,6 +46,7 @@ require (
|
|||||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
||||||
github.com/gorilla/handlers v1.3.0
|
github.com/gorilla/handlers v1.3.0
|
||||||
github.com/gorilla/mux v1.6.2
|
github.com/gorilla/mux v1.6.2
|
||||||
|
github.com/graph-gophers/dataloader v5.0.0+incompatible
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/jinzhu/gorm v1.9.8 // indirect
|
github.com/jinzhu/gorm v1.9.8 // indirect
|
||||||
github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da
|
github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da
|
||||||
@ -53,6 +56,7 @@ require (
|
|||||||
github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e // indirect
|
github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc0
|
github.com/opencontainers/go-digest v1.0.0-rc0
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||||
github.com/pkg/errors v0.8.1
|
github.com/pkg/errors v0.8.1
|
||||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||||
github.com/prometheus/client_golang v0.9.4 // indirect
|
github.com/prometheus/client_golang v0.9.4 // indirect
|
||||||
|
@ -145,6 +145,8 @@ github.com/gorilla/handlers v1.3.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/
|
|||||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
||||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug=
|
||||||
|
github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
@ -209,6 +211,8 @@ github.com/opencontainers/go-digest v1.0.0-rc0 h1:YHPGfp+qlmg7loi376Jk5jNEgjgUUI
|
|||||||
github.com/opencontainers/go-digest v1.0.0-rc0/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v1.0.0-rc0/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
|
||||||
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
@ -12,13 +12,16 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package quota
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// UNLIMITED unlimited resource value
|
||||||
|
UNLIMITED = -1
|
||||||
|
|
||||||
// ResourceCount count, in number
|
// ResourceCount count, in number
|
||||||
ResourceCount ResourceName = "count"
|
ResourceCount ResourceName = "count"
|
||||||
// ResourceStorage storage size, in bytes
|
// ResourceStorage storage size, in bytes
|
||||||
@ -101,3 +104,12 @@ func Subtract(a ResourceList, b ResourceList) ResourceList {
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Zero returns the result of a - a for each named resource
|
||||||
|
func Zero(a ResourceList) ResourceList {
|
||||||
|
result := ResourceList{}
|
||||||
|
for key := range a {
|
||||||
|
result[key] = 0
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
@ -12,7 +12,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package quota
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
@ -67,6 +67,15 @@ func (suite *ResourcesSuite) TestSubtract() {
|
|||||||
suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: -10}, Subtract(res1, res4))
|
suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: -10}, Subtract(res1, res4))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (suite *ResourcesSuite) TestZero() {
|
||||||
|
res1 := ResourceList{ResourceStorage: 100}
|
||||||
|
res2 := ResourceList{ResourceCount: 10, ResourceStorage: 100}
|
||||||
|
|
||||||
|
suite.Equal(ResourceList{}, Zero(ResourceList{}))
|
||||||
|
suite.Equal(ResourceList{ResourceStorage: 0}, Zero(res1))
|
||||||
|
suite.Equal(ResourceList{ResourceStorage: 0, ResourceCount: 0}, Zero(res2))
|
||||||
|
}
|
||||||
|
|
||||||
func TestRunResourcesSuite(t *testing.T) {
|
func TestRunResourcesSuite(t *testing.T) {
|
||||||
suite.Run(t, new(ResourcesSuite))
|
suite.Run(t, new(ResourcesSuite))
|
||||||
}
|
}
|
@ -27,4 +27,8 @@ type ProjectReq struct {
|
|||||||
ProjectName string `json:"project_name,omitempty"`
|
ProjectName string `json:"project_name,omitempty"`
|
||||||
// The metadata of the project.
|
// The metadata of the project.
|
||||||
Metadata map[string]string `json:"metadata,omitempty"`
|
Metadata map[string]string `json:"metadata,omitempty"`
|
||||||
|
// The count quota of the project.
|
||||||
|
CountLimit *int64 `json:"count_limit,omitempty"`
|
||||||
|
// The storage quota of the project
|
||||||
|
StorageLimit *int64 `json:"storage_limit,omitempty"`
|
||||||
}
|
}
|
||||||
|
56
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
56
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -533,6 +533,17 @@ var awsPartition = partition{
|
|||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"backup": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"ap-southeast-2": endpoint{},
|
||||||
|
"eu-central-1": endpoint{},
|
||||||
|
"eu-west-1": endpoint{},
|
||||||
|
"us-east-1": endpoint{},
|
||||||
|
"us-east-2": endpoint{},
|
||||||
|
"us-west-2": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"batch": service{
|
"batch": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
@ -959,7 +970,10 @@ var awsPartition = partition{
|
|||||||
"comprehendmedical": service{
|
"comprehendmedical": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-southeast-2": endpoint{},
|
||||||
|
"ca-central-1": endpoint{},
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
|
"eu-west-2": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
"us-east-2": endpoint{},
|
"us-east-2": endpoint{},
|
||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
@ -1287,6 +1301,7 @@ var awsPartition = partition{
|
|||||||
"elasticbeanstalk": service{
|
"elasticbeanstalk": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"ap-east-1": endpoint{},
|
||||||
"ap-northeast-1": endpoint{},
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-northeast-2": endpoint{},
|
"ap-northeast-2": endpoint{},
|
||||||
"ap-south-1": endpoint{},
|
"ap-south-1": endpoint{},
|
||||||
@ -1585,6 +1600,13 @@ var awsPartition = partition{
|
|||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"groundstation": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"us-east-2": endpoint{},
|
||||||
|
"us-west-2": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"guardduty": service{
|
"guardduty": service{
|
||||||
IsRegionalized: boxedTrue,
|
IsRegionalized: boxedTrue,
|
||||||
Defaults: endpoint{
|
Defaults: endpoint{
|
||||||
@ -1652,6 +1674,7 @@ var awsPartition = partition{
|
|||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
"eu-central-1": endpoint{},
|
"eu-central-1": endpoint{},
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
|
"eu-west-2": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
"us-east-2": endpoint{},
|
"us-east-2": endpoint{},
|
||||||
"us-west-1": endpoint{},
|
"us-west-1": endpoint{},
|
||||||
@ -1689,13 +1712,30 @@ var awsPartition = partition{
|
|||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"iotthingsgraph": service{
|
||||||
|
Defaults: endpoint{
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Service: "iotthingsgraph",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"ap-northeast-1": endpoint{},
|
||||||
|
"ap-southeast-2": endpoint{},
|
||||||
|
"eu-west-1": endpoint{},
|
||||||
|
"us-east-1": endpoint{},
|
||||||
|
"us-west-2": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"kafka": service{
|
"kafka": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
"ap-northeast-1": endpoint{},
|
"ap-northeast-1": endpoint{},
|
||||||
"ap-southeast-1": endpoint{},
|
"ap-southeast-1": endpoint{},
|
||||||
"ap-southeast-2": endpoint{},
|
"ap-southeast-2": endpoint{},
|
||||||
|
"eu-central-1": endpoint{},
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
|
"eu-west-2": endpoint{},
|
||||||
|
"eu-west-3": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
"us-east-2": endpoint{},
|
"us-east-2": endpoint{},
|
||||||
"us-west-2": endpoint{},
|
"us-west-2": endpoint{},
|
||||||
@ -3106,7 +3146,7 @@ var awsPartition = partition{
|
|||||||
"support": service{
|
"support": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
"us-east-1": endpoint{},
|
"aws-global": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"swf": service{
|
"swf": service{
|
||||||
@ -3583,6 +3623,19 @@ var awscnPartition = partition{
|
|||||||
"cn-northwest-1": endpoint{},
|
"cn-northwest-1": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"kms": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"ProdFips": endpoint{
|
||||||
|
Hostname: "kms-fips.cn-northwest-1.amazonaws.com.cn",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "cn-northwest-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"cn-north-1": endpoint{},
|
||||||
|
"cn-northwest-1": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"lambda": service{
|
"lambda": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
@ -3847,6 +3900,7 @@ var awsusgovPartition = partition{
|
|||||||
"athena": service{
|
"athena": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
"us-gov-east-1": endpoint{},
|
||||||
"us-gov-west-1": endpoint{},
|
"us-gov-west-1": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
14
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
14
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
@ -1,19 +1,9 @@
|
|||||||
// +build !appengine,!plan9
|
|
||||||
|
|
||||||
package request
|
package request
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"strings"
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func isErrConnectionReset(err error) bool {
|
func isErrConnectionReset(err error) bool {
|
||||||
if opErr, ok := err.(*net.OpError); ok {
|
return strings.Contains(err.Error(), "connection reset")
|
||||||
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
|
|
||||||
return sysErr.Err == syscall.ECONNRESET
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
11
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
11
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
@ -1,11 +0,0 @@
|
|||||||
// +build appengine plan9
|
|
||||||
|
|
||||||
package request
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func isErrConnectionReset(err error) bool {
|
|
||||||
return strings.Contains(err.Error(), "connection reset")
|
|
||||||
}
|
|
2
src/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
2
src/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
@ -588,7 +588,7 @@ func shouldRetryCancel(err error) bool {
|
|||||||
return err.Temporary()
|
return err.Temporary()
|
||||||
case nil:
|
case nil:
|
||||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||||
// because we don't know the cause, it is marked as retriable. See
|
// because we don't know the cause, it is marked as retryable. See
|
||||||
// TestRequest4xxUnretryable for an example.
|
// TestRequest4xxUnretryable for an example.
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
|
20
src/vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
20
src/vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
@ -7,13 +7,18 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
|
// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
|
||||||
// only be used with an io.Reader that is also an io.Seeker. Doing so may
|
// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
|
||||||
// cause request signature errors, or request body's not sent for GET, HEAD
|
// streaming payload API operations.
|
||||||
// and DELETE HTTP methods.
|
|
||||||
//
|
//
|
||||||
// Deprecated: Should only be used with io.ReadSeeker. If using for
|
// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
|
||||||
// S3 PutObject to stream content use s3manager.Uploader instead.
|
// operation's input will prevent that operation being retried in the case of
|
||||||
|
// network errors, and cause operation requests to fail if the operation
|
||||||
|
// requires payload signing.
|
||||||
|
//
|
||||||
|
// Note: If using With S3 PutObject to stream an object upload The SDK's S3
|
||||||
|
// Upload manager (s3manager.Uploader) provides support for streaming with the
|
||||||
|
// ability to retry network errors.
|
||||||
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
|
||||||
return ReaderSeekerCloser{r}
|
return ReaderSeekerCloser{r}
|
||||||
}
|
}
|
||||||
@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool {
|
|||||||
// Read reads from the reader up to size of p. The number of bytes read, and
|
// Read reads from the reader up to size of p. The number of bytes read, and
|
||||||
// error if it occurred will be returned.
|
// error if it occurred will be returned.
|
||||||
//
|
//
|
||||||
// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
|
// If the reader is not an io.Reader zero bytes read, and nil error will be
|
||||||
|
// returned.
|
||||||
//
|
//
|
||||||
// Performs the same functionality as io.Reader Read
|
// Performs the same functionality as io.Reader Read
|
||||||
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
|
||||||
|
2
src/vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
src/vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
|||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.19.36"
|
const SDKVersion = "1.19.47"
|
||||||
|
6
src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
6
src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
generated
vendored
@ -304,7 +304,9 @@ loop:
|
|||||||
stmt := newCommentStatement(tok)
|
stmt := newCommentStatement(tok)
|
||||||
stack.Push(stmt)
|
stack.Push(stmt)
|
||||||
default:
|
default:
|
||||||
return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
|
return nil, NewParseError(
|
||||||
|
fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
|
||||||
|
k, tok.Type()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(tokens) > 0 {
|
if len(tokens) > 0 {
|
||||||
@ -314,7 +316,7 @@ loop:
|
|||||||
|
|
||||||
// this occurs when a statement has not been completed
|
// this occurs when a statement has not been completed
|
||||||
if stack.top > 1 {
|
if stack.top > 1 {
|
||||||
return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
|
return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns a sublist which excludes the start symbol
|
// returns a sublist which excludes the start symbol
|
||||||
|
35
src/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
generated
vendored
35
src/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
generated
vendored
@ -884,7 +884,7 @@ func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesIn
|
|||||||
// // Example iterating over at most 3 pages of a DescribeImages operation.
|
// // Example iterating over at most 3 pages of a DescribeImages operation.
|
||||||
// pageNum := 0
|
// pageNum := 0
|
||||||
// err := client.DescribeImagesPages(params,
|
// err := client.DescribeImagesPages(params,
|
||||||
// func(page *DescribeImagesOutput, lastPage bool) bool {
|
// func(page *ecr.DescribeImagesOutput, lastPage bool) bool {
|
||||||
// pageNum++
|
// pageNum++
|
||||||
// fmt.Println(page)
|
// fmt.Println(page)
|
||||||
// return pageNum <= 3
|
// return pageNum <= 3
|
||||||
@ -1027,7 +1027,7 @@ func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRe
|
|||||||
// // Example iterating over at most 3 pages of a DescribeRepositories operation.
|
// // Example iterating over at most 3 pages of a DescribeRepositories operation.
|
||||||
// pageNum := 0
|
// pageNum := 0
|
||||||
// err := client.DescribeRepositoriesPages(params,
|
// err := client.DescribeRepositoriesPages(params,
|
||||||
// func(page *DescribeRepositoriesOutput, lastPage bool) bool {
|
// func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool {
|
||||||
// pageNum++
|
// pageNum++
|
||||||
// fmt.Println(page)
|
// fmt.Println(page)
|
||||||
// return pageNum <= 3
|
// return pageNum <= 3
|
||||||
@ -1728,7 +1728,7 @@ func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opt
|
|||||||
// // Example iterating over at most 3 pages of a ListImages operation.
|
// // Example iterating over at most 3 pages of a ListImages operation.
|
||||||
// pageNum := 0
|
// pageNum := 0
|
||||||
// err := client.ListImagesPages(params,
|
// err := client.ListImagesPages(params,
|
||||||
// func(page *ListImagesOutput, lastPage bool) bool {
|
// func(page *ecr.ListImagesOutput, lastPage bool) bool {
|
||||||
// pageNum++
|
// pageNum++
|
||||||
// fmt.Println(page)
|
// fmt.Println(page)
|
||||||
// return pageNum <= 3
|
// return pageNum <= 3
|
||||||
@ -3936,22 +3936,21 @@ type GetLifecyclePolicyPreviewInput struct {
|
|||||||
ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
|
ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
|
||||||
|
|
||||||
// The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest
|
// The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest
|
||||||
// in
paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest
|
// in paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest
|
||||||
// only returns
maxResults results in a single page along with a nextToken
|
// only returns maxResults results in a single page along with a nextToken response
|
||||||
// response element. The remaining results of the initial request can be seen
|
// element. The remaining results of the initial request can be seen by sending
|
||||||
// by sending
another GetLifecyclePolicyPreviewRequest request with the returned
|
// another GetLifecyclePolicyPreviewRequest request with the returned nextToken
|
||||||
// nextToken
value. This value can be between 1 and 1000. If this
parameter
|
// value. This value can be between 1 and 1000. If this parameter is not used,
|
||||||
// is not used, then GetLifecyclePolicyPreviewRequest returns up to
100 results
|
// then GetLifecyclePolicyPreviewRequest returns up to 100 results and a nextToken
|
||||||
// and a nextToken value, if
applicable. This option cannot be used when you
|
// value, if applicable. This option cannot be used when you specify images
|
||||||
// specify images with imageIds.
|
// with imageIds.
|
||||||
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
|
MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
|
||||||
|
|
||||||
// The nextToken value returned from a previous paginated
GetLifecyclePolicyPreviewRequest
|
// The nextToken value returned from a previous paginated GetLifecyclePolicyPreviewRequest
|
||||||
// request where maxResults was used and the
results exceeded the value of
|
// request where maxResults was used and the results exceeded the value of that
|
||||||
// that parameter. Pagination continues from the end of the
previous results
|
// parameter. Pagination continues from the end of the previous results that
|
||||||
// that returned the nextToken value. This value is
null when there are no
|
// returned the nextToken value. This value is null when there are no more results
|
||||||
// more results to return. This option cannot be used when you specify images
|
// to return. This option cannot be used when you specify images with imageIds.
|
||||||
// with imageIds.
|
|
||||||
NextToken *string `locationName:"nextToken" type:"string"`
|
NextToken *string `locationName:"nextToken" type:"string"`
|
||||||
|
|
||||||
// The AWS account ID associated with the registry that contains the repository.
|
// The AWS account ID associated with the registry that contains the repository.
|
||||||
@ -5041,7 +5040,7 @@ type PutLifecyclePolicyInput struct {
|
|||||||
LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"`
|
LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"`
|
||||||
|
|
||||||
// The AWS account ID associated with the registry that contains the repository.
|
// The AWS account ID associated with the registry that contains the repository.
|
||||||
// If you do
not specify a registry, the default registry is assumed.
|
// If you do not specify a registry, the default registry is assumed.
|
||||||
RegistryId *string `locationName:"registryId" type:"string"`
|
RegistryId *string `locationName:"registryId" type:"string"`
|
||||||
|
|
||||||
// The name of the repository to receive the policy.
|
// The name of the repository to receive the policy.
|
||||||
|
25
src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
25
src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
@ -106,7 +106,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||||||
// AWS API calls to access resources in the account that owns the role. You
|
// AWS API calls to access resources in the account that owns the role. You
|
||||||
// cannot use session policies to grant more permissions than those allowed
|
// cannot use session policies to grant more permissions than those allowed
|
||||||
// by the identity-based policy of the role that is being assumed. For more
|
// by the identity-based policy of the role that is being assumed. For more
|
||||||
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// To assume a role from a different account, your AWS account must be trusted
|
// To assume a role from a different account, your AWS account must be trusted
|
||||||
@ -286,7 +286,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
|
|||||||
// AWS API calls to access resources in the account that owns the role. You
|
// AWS API calls to access resources in the account that owns the role. You
|
||||||
// cannot use session policies to grant more permissions than those allowed
|
// cannot use session policies to grant more permissions than those allowed
|
||||||
// by the identity-based policy of the role that is being assumed. For more
|
// by the identity-based policy of the role that is being assumed. For more
|
||||||
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// Before your application can call AssumeRoleWithSAML, you must configure your
|
// Before your application can call AssumeRoleWithSAML, you must configure your
|
||||||
@ -484,7 +484,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||||||
// AWS API calls to access resources in the account that owns the role. You
|
// AWS API calls to access resources in the account that owns the role. You
|
||||||
// cannot use session policies to grant more permissions than those allowed
|
// cannot use session policies to grant more permissions than those allowed
|
||||||
// by the identity-based policy of the role that is being assumed. For more
|
// by the identity-based policy of the role that is being assumed. For more
|
||||||
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// Before your application can call AssumeRoleWithWebIdentity, you must have
|
// Before your application can call AssumeRoleWithWebIdentity, you must have
|
||||||
@ -506,7 +506,6 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
|||||||
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
|
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
|
||||||
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
|
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
|
||||||
//
|
//
|
||||||
//
|
|
||||||
// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
|
// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
|
||||||
// Walk through the process of authenticating through Login with Amazon,
|
// Walk through the process of authenticating through Login with Amazon,
|
||||||
// Facebook, or Google, getting temporary security credentials, and then
|
// Facebook, or Google, getting temporary security credentials, and then
|
||||||
@ -880,7 +879,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||||||
// you pass. This gives you a way to further restrict the permissions for a
|
// you pass. This gives you a way to further restrict the permissions for a
|
||||||
// federated user. You cannot use session policies to grant more permissions
|
// federated user. You cannot use session policies to grant more permissions
|
||||||
// than those that are defined in the permissions policy of the IAM user. For
|
// than those that are defined in the permissions policy of the IAM user. For
|
||||||
// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide. For information about using GetFederationToken to
|
// in the IAM User Guide. For information about using GetFederationToken to
|
||||||
// create temporary security credentials, see GetFederationToken—Federation
|
// create temporary security credentials, see GetFederationToken—Federation
|
||||||
// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
|
// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
|
||||||
@ -1107,7 +1106,7 @@ type AssumeRoleInput struct {
|
|||||||
// the role's temporary credentials in subsequent AWS API calls to access resources
|
// the role's temporary credentials in subsequent AWS API calls to access resources
|
||||||
// in the account that owns the role. You cannot use session policies to grant
|
// in the account that owns the role. You cannot use session policies to grant
|
||||||
// more permissions than those allowed by the identity-based policy of the role
|
// more permissions than those allowed by the identity-based policy of the role
|
||||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// The plain text that you use for both inline and managed session policies
|
// The plain text that you use for both inline and managed session policies
|
||||||
@ -1145,7 +1144,7 @@ type AssumeRoleInput struct {
|
|||||||
// in subsequent AWS API calls to access resources in the account that owns
|
// in subsequent AWS API calls to access resources in the account that owns
|
||||||
// the role. You cannot use session policies to grant more permissions than
|
// the role. You cannot use session policies to grant more permissions than
|
||||||
// those allowed by the identity-based policy of the role that is being assumed.
|
// those allowed by the identity-based policy of the role that is being assumed.
|
||||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||||
|
|
||||||
@ -1385,7 +1384,7 @@ type AssumeRoleWithSAMLInput struct {
|
|||||||
// the role's temporary credentials in subsequent AWS API calls to access resources
|
// the role's temporary credentials in subsequent AWS API calls to access resources
|
||||||
// in the account that owns the role. You cannot use session policies to grant
|
// in the account that owns the role. You cannot use session policies to grant
|
||||||
// more permissions than those allowed by the identity-based policy of the role
|
// more permissions than those allowed by the identity-based policy of the role
|
||||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// The plain text that you use for both inline and managed session policies
|
// The plain text that you use for both inline and managed session policies
|
||||||
@ -1423,7 +1422,7 @@ type AssumeRoleWithSAMLInput struct {
|
|||||||
// in subsequent AWS API calls to access resources in the account that owns
|
// in subsequent AWS API calls to access resources in the account that owns
|
||||||
// the role. You cannot use session policies to grant more permissions than
|
// the role. You cannot use session policies to grant more permissions than
|
||||||
// those allowed by the identity-based policy of the role that is being assumed.
|
// those allowed by the identity-based policy of the role that is being assumed.
|
||||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||||
|
|
||||||
@ -1680,7 +1679,7 @@ type AssumeRoleWithWebIdentityInput struct {
|
|||||||
// the role's temporary credentials in subsequent AWS API calls to access resources
|
// the role's temporary credentials in subsequent AWS API calls to access resources
|
||||||
// in the account that owns the role. You cannot use session policies to grant
|
// in the account that owns the role. You cannot use session policies to grant
|
||||||
// more permissions than those allowed by the identity-based policy of the role
|
// more permissions than those allowed by the identity-based policy of the role
|
||||||
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// The plain text that you use for both inline and managed session policies
|
// The plain text that you use for both inline and managed session policies
|
||||||
@ -1718,7 +1717,7 @@ type AssumeRoleWithWebIdentityInput struct {
|
|||||||
// in subsequent AWS API calls to access resources in the account that owns
|
// in subsequent AWS API calls to access resources in the account that owns
|
||||||
// the role. You cannot use session policies to grant more permissions than
|
// the role. You cannot use session policies to grant more permissions than
|
||||||
// those allowed by the identity-based policy of the role that is being assumed.
|
// those allowed by the identity-based policy of the role that is being assumed.
|
||||||
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||||
|
|
||||||
@ -2259,7 +2258,7 @@ type GetFederationTokenInput struct {
|
|||||||
// you a way to further restrict the permissions for a federated user. You cannot
|
// you a way to further restrict the permissions for a federated user. You cannot
|
||||||
// use session policies to grant more permissions than those that are defined
|
// use session policies to grant more permissions than those that are defined
|
||||||
// in the permissions policy of the IAM user. For more information, see Session
|
// in the permissions policy of the IAM user. For more information, see Session
|
||||||
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// The plain text that you use for both inline and managed session policies
|
// The plain text that you use for both inline and managed session policies
|
||||||
@ -2299,7 +2298,7 @@ type GetFederationTokenInput struct {
|
|||||||
// you a way to further restrict the permissions for a federated user. You cannot
|
// you a way to further restrict the permissions for a federated user. You cannot
|
||||||
// use session policies to grant more permissions than those that are defined
|
// use session policies to grant more permissions than those that are defined
|
||||||
// in the permissions policy of the IAM user. For more information, see Session
|
// in the permissions policy of the IAM user. For more information, see Session
|
||||||
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM/latest/UserGuide/access_policies.html#policies_session)
|
// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// The characters in this parameter count towards the 2048 character session
|
// The characters in this parameter count towards the 2048 character session
|
||||||
|
18
src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
18
src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
generated
vendored
@ -9,14 +9,6 @@
|
|||||||
// This guide provides descriptions of the STS API. For more detailed information
|
// This guide provides descriptions of the STS API. For more detailed information
|
||||||
// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
|
||||||
//
|
//
|
||||||
// As an alternative to using the API, you can use one of the AWS SDKs, which
|
|
||||||
// consist of libraries and sample code for various programming languages and
|
|
||||||
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
|
|
||||||
// way to create programmatic access to STS. For example, the SDKs take care
|
|
||||||
// of cryptographically signing requests, managing errors, and retrying requests
|
|
||||||
// automatically. For information about the AWS SDKs, including how to download
|
|
||||||
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
|
|
||||||
//
|
|
||||||
// For information about setting up signatures and authorization through the
|
// For information about setting up signatures and authorization through the
|
||||||
// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
|
// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
|
||||||
// in the AWS General Reference. For general information about the Query API,
|
// in the AWS General Reference. For general information about the Query API,
|
||||||
@ -53,11 +45,11 @@
|
|||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
//
|
//
|
||||||
// After you activate a Region for use with AWS STS, you can direct AWS STS
|
// After you activate a Region for use with AWS STS, you can direct AWS STS
|
||||||
// API calls to that Region. AWS STS recommends that you use both the setRegion
|
// API calls to that Region. AWS STS recommends that you provide both the Region
|
||||||
// and setEndpoint methods to make calls to a Regional endpoint. You can use
|
// and endpoint when you make calls to a Regional endpoint. You can provide
|
||||||
// the setRegion method alone for manually enabled Regions, such as Asia Pacific
|
// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
|
||||||
// (Hong Kong). In this case, the calls are directed to the STS Regional endpoint.
|
// Kong). In this case, the calls are directed to the STS Regional endpoint.
|
||||||
// However, if you use the setRegion method alone for Regions enabled by default,
|
// However, if you provide the Region alone for Regions enabled by default,
|
||||||
// the calls are directed to the global endpoint of https://sts.amazonaws.com.
|
// the calls are directed to the global endpoint of https://sts.amazonaws.com.
|
||||||
//
|
//
|
||||||
// To view the list of AWS STS endpoints and whether they are active by default,
|
// To view the list of AWS STS endpoints and whether they are active by default,
|
||||||
|
1
src/vendor/github.com/graph-gophers/dataloader/.gitignore
generated
vendored
Normal file
1
src/vendor/github.com/graph-gophers/dataloader/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
vendor/
|
15
src/vendor/github.com/graph-gophers/dataloader/.travis.yml
generated
vendored
Normal file
15
src/vendor/github.com/graph-gophers/dataloader/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.8
|
||||||
|
- 1.x
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -u github.com/golang/dep/...
|
||||||
|
- dep ensure
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -v -race -coverprofile=coverage.txt -covermode=atomic
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
33
src/vendor/github.com/graph-gophers/dataloader/Gopkg.lock
generated
vendored
Normal file
33
src/vendor/github.com/graph-gophers/dataloader/Gopkg.lock
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/golang-lru"
|
||||||
|
packages = [".","simplelru"]
|
||||||
|
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
packages = [".","log"]
|
||||||
|
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
|
||||||
|
version = "v1.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/patrickmn/go-cache"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||||
|
version = "v2.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = ["context"]
|
||||||
|
revision = "a8b9294777976932365dabb6640cf1468d95c70f"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "a0b8606d9f2ed9df7e69cae570c65c7d7b090bb7a08f58d3535b584693d44da9"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
34
src/vendor/github.com/graph-gophers/dataloader/Gopkg.toml
generated
vendored
Normal file
34
src/vendor/github.com/graph-gophers/dataloader/Gopkg.toml
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
|
||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/golang-lru"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/opentracing/opentracing-go"
|
||||||
|
version = "1.0.2"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/patrickmn/go-cache"
|
||||||
|
version = "2.1.0"
|
21
src/vendor/github.com/graph-gophers/dataloader/LICENSE
generated
vendored
Normal file
21
src/vendor/github.com/graph-gophers/dataloader/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2017 Nick Randall
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
88
src/vendor/github.com/graph-gophers/dataloader/MIGRATE.md
generated
vendored
Normal file
88
src/vendor/github.com/graph-gophers/dataloader/MIGRATE.md
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
## Upgrade from v1 to v2
|
||||||
|
The only difference between v1 and v2 is that we added use of [context](https://golang.org/pkg/context).
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- loader.Load(key string) Thunk
|
||||||
|
+ loader.Load(ctx context.Context, key string) Thunk
|
||||||
|
- loader.LoadMany(keys []string) ThunkMany
|
||||||
|
+ loader.LoadMany(ctx context.Context, keys []string) ThunkMany
|
||||||
|
```
|
||||||
|
|
||||||
|
```diff
|
||||||
|
- type BatchFunc func([]string) []*Result
|
||||||
|
+ type BatchFunc func(context.Context, []string) []*Result
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upgrade from v2 to v3
|
||||||
|
```diff
|
||||||
|
// dataloader.Interface as added context.Context to methods
|
||||||
|
- loader.Prime(key string, value interface{}) Interface
|
||||||
|
+ loader.Prime(ctx context.Context, key string, value interface{}) Interface
|
||||||
|
- loader.Clear(key string) Interface
|
||||||
|
+ loader.Clear(ctx context.Context, key string) Interface
|
||||||
|
```
|
||||||
|
|
||||||
|
```diff
|
||||||
|
// cache interface as added context.Context to methods
|
||||||
|
type Cache interface {
|
||||||
|
- Get(string) (Thunk, bool)
|
||||||
|
+ Get(context.Context, string) (Thunk, bool)
|
||||||
|
- Set(string, Thunk)
|
||||||
|
+ Set(context.Context, string, Thunk)
|
||||||
|
- Delete(string) bool
|
||||||
|
+ Delete(context.Context, string) bool
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upgrade from v3 to v4
|
||||||
|
```diff
|
||||||
|
// dataloader.Interface as now allows interace{} as key rather than string
|
||||||
|
- loader.Load(context.Context, key string) Thunk
|
||||||
|
+ loader.Load(ctx context.Context, key interface{}) Thunk
|
||||||
|
- loader.LoadMany(context.Context, key []string) ThunkMany
|
||||||
|
+ loader.LoadMany(ctx context.Context, keys []interface{}) ThunkMany
|
||||||
|
- loader.Prime(context.Context, key string, value interface{}) Interface
|
||||||
|
+ loader.Prime(ctx context.Context, key interface{}, value interface{}) Interface
|
||||||
|
- loader.Clear(context.Context, key string) Interface
|
||||||
|
+ loader.Clear(ctx context.Context, key interface{}) Interface
|
||||||
|
```
|
||||||
|
|
||||||
|
```diff
|
||||||
|
// cache interface now allows interface{} as key instead of string
|
||||||
|
type Cache interface {
|
||||||
|
- Get(context.Context, string) (Thunk, bool)
|
||||||
|
+ Get(context.Context, interface{}) (Thunk, bool)
|
||||||
|
- Set(context.Context, string, Thunk)
|
||||||
|
+ Set(context.Context, interface{}, Thunk)
|
||||||
|
- Delete(context.Context, string) bool
|
||||||
|
+ Delete(context.Context, interface{}) bool
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Upgrade from v4 to v5
|
||||||
|
```diff
|
||||||
|
// dataloader.Interface as now allows interace{} as key rather than string
|
||||||
|
- loader.Load(context.Context, key interface{}) Thunk
|
||||||
|
+ loader.Load(ctx context.Context, key Key) Thunk
|
||||||
|
- loader.LoadMany(context.Context, key []interface{}) ThunkMany
|
||||||
|
+ loader.LoadMany(ctx context.Context, keys Keys) ThunkMany
|
||||||
|
- loader.Prime(context.Context, key interface{}, value interface{}) Interface
|
||||||
|
+ loader.Prime(ctx context.Context, key Key, value interface{}) Interface
|
||||||
|
- loader.Clear(context.Context, key interface{}) Interface
|
||||||
|
+ loader.Clear(ctx context.Context, key Key) Interface
|
||||||
|
```
|
||||||
|
|
||||||
|
```diff
|
||||||
|
// cache interface now allows interface{} as key instead of string
|
||||||
|
type Cache interface {
|
||||||
|
- Get(context.Context, interface{}) (Thunk, bool)
|
||||||
|
+ Get(context.Context, Key) (Thunk, bool)
|
||||||
|
- Set(context.Context, interface{}, Thunk)
|
||||||
|
+ Set(context.Context, Key, Thunk)
|
||||||
|
- Delete(context.Context, interface{}) bool
|
||||||
|
+ Delete(context.Context, Key) bool
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
```
|
48
src/vendor/github.com/graph-gophers/dataloader/README.md
generated
vendored
Normal file
48
src/vendor/github.com/graph-gophers/dataloader/README.md
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
# DataLoader
|
||||||
|
[![GoDoc](https://godoc.org/gopkg.in/graph-gophers/dataloader.v3?status.svg)](https://godoc.org/github.com/graph-gophers/dataloader)
|
||||||
|
[![Build Status](https://travis-ci.org/graph-gophers/dataloader.svg?branch=master)](https://travis-ci.org/graph-gophers/dataloader)
|
||||||
|
|
||||||
|
This is an implementation of [Facebook's DataLoader](https://github.com/facebook/dataloader) in Golang.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
`go get -u github.com/graph-gophers/dataloader`
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```go
|
||||||
|
// setup batch function
|
||||||
|
batchFn := func(ctx context.Context, keys dataloader.Keys) []*dataloader.Result {
|
||||||
|
var results []*dataloader.Result
|
||||||
|
// do some aync work to get data for specified keys
|
||||||
|
// append to this list resolved values
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
// create Loader with an in-memory cache
|
||||||
|
loader := dataloader.NewBatchedLoader(batchFn)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Use loader
|
||||||
|
*
|
||||||
|
* A thunk is a function returned from a function that is a
|
||||||
|
* closure over a value (in this case an interface value and error).
|
||||||
|
* When called, it will block until the value is resolved.
|
||||||
|
*/
|
||||||
|
thunk := loader.Load(ctx.TODO(), dataloader.StringKey("key1")) // StringKey is a convenience method that make wraps string to implement `Key` interface
|
||||||
|
result, err := thunk()
|
||||||
|
if err != nil {
|
||||||
|
// handle data error
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("value: %#v", result)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Don't need/want to use context?
|
||||||
|
You're welcome to install the v1 version of this library.
|
||||||
|
|
||||||
|
## Cache
|
||||||
|
This implementation contains a very basic cache that is intended only to be used for short lived DataLoaders (i.e. DataLoaders that ony exsist for the life of an http request). You may use your own implementation if you want.
|
||||||
|
|
||||||
|
> it also has a `NoCache` type that implements the cache interface but all methods are noop. If you do not wish to cache anyting.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
There are a few basic examples in the example folder.
|
28
src/vendor/github.com/graph-gophers/dataloader/cache.go
generated
vendored
Normal file
28
src/vendor/github.com/graph-gophers/dataloader/cache.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
package dataloader
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
// The Cache interface. If a custom cache is provided, it must implement this interface.
|
||||||
|
type Cache interface {
|
||||||
|
Get(context.Context, Key) (Thunk, bool)
|
||||||
|
Set(context.Context, Key, Thunk)
|
||||||
|
Delete(context.Context, Key) bool
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoCache implements Cache interface where all methods are noops.
|
||||||
|
// This is useful for when you don't want to cache items but still
|
||||||
|
// want to use a data loader
|
||||||
|
type NoCache struct{}
|
||||||
|
|
||||||
|
// Get is a NOOP
|
||||||
|
func (c *NoCache) Get(context.Context, Key) (Thunk, bool) { return nil, false }
|
||||||
|
|
||||||
|
// Set is a NOOP
|
||||||
|
func (c *NoCache) Set(context.Context, Key, Thunk) { return }
|
||||||
|
|
||||||
|
// Delete is a NOOP
|
||||||
|
func (c *NoCache) Delete(context.Context, Key) bool { return false }
|
||||||
|
|
||||||
|
// Clear is a NOOP
|
||||||
|
func (c *NoCache) Clear() { return }
|
26
src/vendor/github.com/graph-gophers/dataloader/codecov.yml
generated
vendored
Normal file
26
src/vendor/github.com/graph-gophers/dataloader/codecov.yml
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
codecov:
|
||||||
|
notify:
|
||||||
|
require_ci_to_pass: true
|
||||||
|
comment:
|
||||||
|
behavior: default
|
||||||
|
layout: header, diff
|
||||||
|
require_changes: false
|
||||||
|
coverage:
|
||||||
|
precision: 2
|
||||||
|
range:
|
||||||
|
- 70.0
|
||||||
|
- 100.0
|
||||||
|
round: down
|
||||||
|
status:
|
||||||
|
changes: false
|
||||||
|
patch: true
|
||||||
|
project: true
|
||||||
|
parsers:
|
||||||
|
gcov:
|
||||||
|
branch_detection:
|
||||||
|
conditional: true
|
||||||
|
loop: true
|
||||||
|
macro: false
|
||||||
|
method: false
|
||||||
|
javascript:
|
||||||
|
enable_partials: false
|
492
src/vendor/github.com/graph-gophers/dataloader/dataloader.go
generated
vendored
Normal file
492
src/vendor/github.com/graph-gophers/dataloader/dataloader.go
generated
vendored
Normal file
@ -0,0 +1,492 @@
|
|||||||
|
// Package dataloader is an implimentation of facebook's dataloader in go.
|
||||||
|
// See https://github.com/facebook/dataloader for more information
|
||||||
|
package dataloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Interface is a `DataLoader` Interface which defines a public API for loading data from a particular
|
||||||
|
// data back-end with unique keys such as the `id` column of a SQL table or
|
||||||
|
// document name in a MongoDB database, given a batch loading function.
|
||||||
|
//
|
||||||
|
// Each `DataLoader` instance should contain a unique memoized cache. Use caution when
|
||||||
|
// used in long-lived applications or those which serve many users with
|
||||||
|
// different access permissions and consider creating a new instance per
|
||||||
|
// web request.
|
||||||
|
type Interface interface {
|
||||||
|
Load(context.Context, Key) Thunk
|
||||||
|
LoadMany(context.Context, Keys) ThunkMany
|
||||||
|
Clear(context.Context, Key) Interface
|
||||||
|
ClearAll() Interface
|
||||||
|
Prime(ctx context.Context, key Key, value interface{}) Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
// BatchFunc is a function, which when given a slice of keys (string), returns an slice of `results`.
|
||||||
|
// It's important that the length of the input keys matches the length of the output results.
|
||||||
|
//
|
||||||
|
// The keys passed to this function are guaranteed to be unique
|
||||||
|
type BatchFunc func(context.Context, Keys) []*Result
|
||||||
|
|
||||||
|
// Result is the data structure that a BatchFunc returns.
|
||||||
|
// It contains the resolved data, and any errors that may have occurred while fetching the data.
|
||||||
|
type Result struct {
|
||||||
|
Data interface{}
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResultMany is used by the LoadMany method.
|
||||||
|
// It contains a list of resolved data and a list of errors.
|
||||||
|
// The lengths of the data list and error list will match, and elements at each index correspond to each other.
|
||||||
|
type ResultMany struct {
|
||||||
|
Data []interface{}
|
||||||
|
Error []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loader implements the dataloader.Interface.
|
||||||
|
type Loader struct {
|
||||||
|
// the batch function to be used by this loader
|
||||||
|
batchFn BatchFunc
|
||||||
|
|
||||||
|
// the maximum batch size. Set to 0 if you want it to be unbounded.
|
||||||
|
batchCap int
|
||||||
|
|
||||||
|
// the internal cache. This packages contains a basic cache implementation but any custom cache
|
||||||
|
// implementation could be used as long as it implements the `Cache` interface.
|
||||||
|
cacheLock sync.Mutex
|
||||||
|
cache Cache
|
||||||
|
// should we clear the cache on each batch?
|
||||||
|
// this would allow batching but no long term caching
|
||||||
|
clearCacheOnBatch bool
|
||||||
|
|
||||||
|
// count of queued up items
|
||||||
|
count int
|
||||||
|
|
||||||
|
// the maximum input queue size. Set to 0 if you want it to be unbounded.
|
||||||
|
inputCap int
|
||||||
|
|
||||||
|
// the amount of time to wait before triggering a batch
|
||||||
|
wait time.Duration
|
||||||
|
|
||||||
|
// lock to protect the batching operations
|
||||||
|
batchLock sync.Mutex
|
||||||
|
|
||||||
|
// current batcher
|
||||||
|
curBatcher *batcher
|
||||||
|
|
||||||
|
// used to close the sleeper of the current batcher
|
||||||
|
endSleeper chan bool
|
||||||
|
|
||||||
|
// used by tests to prevent logs
|
||||||
|
silent bool
|
||||||
|
|
||||||
|
// can be set to trace calls to dataloader
|
||||||
|
tracer Tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Thunk is a function that will block until the value (*Result) it contains is resolved.
|
||||||
|
// After the value it contains is resolved, this function will return the result.
|
||||||
|
// This function can be called many times, much like a Promise is other languages.
|
||||||
|
// The value will only need to be resolved once so subsequent calls will return immediately.
|
||||||
|
type Thunk func() (interface{}, error)
|
||||||
|
|
||||||
|
// ThunkMany is much like the Thunk func type but it contains a list of results.
|
||||||
|
type ThunkMany func() ([]interface{}, []error)
|
||||||
|
|
||||||
|
// type used to on input channel
|
||||||
|
type batchRequest struct {
|
||||||
|
key Key
|
||||||
|
channel chan *Result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option allows for configuration of Loader fields.
|
||||||
|
type Option func(*Loader)
|
||||||
|
|
||||||
|
// WithCache sets the BatchedLoader cache. Defaults to InMemoryCache if a Cache is not set.
|
||||||
|
func WithCache(c Cache) Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.cache = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithBatchCapacity sets the batch capacity. Default is 0 (unbounded).
|
||||||
|
func WithBatchCapacity(c int) Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.batchCap = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithInputCapacity sets the input capacity. Default is 1000.
|
||||||
|
func WithInputCapacity(c int) Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.inputCap = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithWait sets the amount of time to wait before triggering a batch.
|
||||||
|
// Default duration is 16 milliseconds.
|
||||||
|
func WithWait(d time.Duration) Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.wait = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithClearCacheOnBatch allows batching of items but no long term caching.
|
||||||
|
// It accomplishes this by clearing the cache after each batch operation.
|
||||||
|
func WithClearCacheOnBatch() Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.cacheLock.Lock()
|
||||||
|
l.clearCacheOnBatch = true
|
||||||
|
l.cacheLock.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// withSilentLogger turns of log messages. It's used by the tests
|
||||||
|
func withSilentLogger() Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.silent = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTracer allows tracing of calls to Load and LoadMany
|
||||||
|
func WithTracer(tracer Tracer) Option {
|
||||||
|
return func(l *Loader) {
|
||||||
|
l.tracer = tracer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOpenTracingTracer allows tracing of calls to Load and LoadMany
|
||||||
|
func WithOpenTracingTracer() Option {
|
||||||
|
return WithTracer(&OpenTracingTracer{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBatchedLoader constructs a new Loader with given options.
|
||||||
|
func NewBatchedLoader(batchFn BatchFunc, opts ...Option) *Loader {
|
||||||
|
loader := &Loader{
|
||||||
|
batchFn: batchFn,
|
||||||
|
inputCap: 1000,
|
||||||
|
wait: 16 * time.Millisecond,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply options
|
||||||
|
for _, apply := range opts {
|
||||||
|
apply(loader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set defaults
|
||||||
|
if loader.cache == nil {
|
||||||
|
loader.cache = NewCache()
|
||||||
|
}
|
||||||
|
|
||||||
|
if loader.tracer == nil {
|
||||||
|
loader.tracer = &NoopTracer{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return loader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load load/resolves the given key, returning a channel that will contain the value and error
|
||||||
|
func (l *Loader) Load(originalContext context.Context, key Key) Thunk {
|
||||||
|
ctx, finish := l.tracer.TraceLoad(originalContext, key)
|
||||||
|
|
||||||
|
c := make(chan *Result, 1)
|
||||||
|
var result struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
value *Result
|
||||||
|
}
|
||||||
|
|
||||||
|
// lock to prevent duplicate keys coming in before item has been added to cache.
|
||||||
|
l.cacheLock.Lock()
|
||||||
|
if v, ok := l.cache.Get(ctx, key); ok {
|
||||||
|
defer finish(v)
|
||||||
|
defer l.cacheLock.Unlock()
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
thunk := func() (interface{}, error) {
|
||||||
|
result.mu.RLock()
|
||||||
|
resultNotSet := result.value == nil
|
||||||
|
result.mu.RUnlock()
|
||||||
|
|
||||||
|
if resultNotSet {
|
||||||
|
result.mu.Lock()
|
||||||
|
if v, ok := <-c; ok {
|
||||||
|
result.value = v
|
||||||
|
}
|
||||||
|
result.mu.Unlock()
|
||||||
|
}
|
||||||
|
result.mu.RLock()
|
||||||
|
defer result.mu.RUnlock()
|
||||||
|
return result.value.Data, result.value.Error
|
||||||
|
}
|
||||||
|
defer finish(thunk)
|
||||||
|
|
||||||
|
l.cache.Set(ctx, key, thunk)
|
||||||
|
l.cacheLock.Unlock()
|
||||||
|
|
||||||
|
// this is sent to batch fn. It contains the key and the channel to return the
|
||||||
|
// the result on
|
||||||
|
req := &batchRequest{key, c}
|
||||||
|
|
||||||
|
l.batchLock.Lock()
|
||||||
|
// start the batch window if it hasn't already started.
|
||||||
|
if l.curBatcher == nil {
|
||||||
|
l.curBatcher = l.newBatcher(l.silent, l.tracer)
|
||||||
|
// start the current batcher batch function
|
||||||
|
go l.curBatcher.batch(originalContext)
|
||||||
|
// start a sleeper for the current batcher
|
||||||
|
l.endSleeper = make(chan bool)
|
||||||
|
go l.sleeper(l.curBatcher, l.endSleeper)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.curBatcher.input <- req
|
||||||
|
|
||||||
|
// if we need to keep track of the count (max batch), then do so.
|
||||||
|
if l.batchCap > 0 {
|
||||||
|
l.count++
|
||||||
|
// if we hit our limit, force the batch to start
|
||||||
|
if l.count == l.batchCap {
|
||||||
|
// end the batcher synchronously here because another call to Load
|
||||||
|
// may concurrently happen and needs to go to a new batcher.
|
||||||
|
l.curBatcher.end()
|
||||||
|
// end the sleeper for the current batcher.
|
||||||
|
// this is to stop the goroutine without waiting for the
|
||||||
|
// sleeper timeout.
|
||||||
|
close(l.endSleeper)
|
||||||
|
l.reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.batchLock.Unlock()
|
||||||
|
|
||||||
|
return thunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadMany loads mulitiple keys, returning a thunk (type: ThunkMany) that will resolve the keys passed in.
|
||||||
|
func (l *Loader) LoadMany(originalContext context.Context, keys Keys) ThunkMany {
|
||||||
|
ctx, finish := l.tracer.TraceLoadMany(originalContext, keys)
|
||||||
|
|
||||||
|
var (
|
||||||
|
length = len(keys)
|
||||||
|
data = make([]interface{}, length)
|
||||||
|
errors = make([]error, length)
|
||||||
|
c = make(chan *ResultMany, 1)
|
||||||
|
wg sync.WaitGroup
|
||||||
|
)
|
||||||
|
|
||||||
|
resolve := func(ctx context.Context, i int) {
|
||||||
|
defer wg.Done()
|
||||||
|
thunk := l.Load(ctx, keys[i])
|
||||||
|
result, err := thunk()
|
||||||
|
data[i] = result
|
||||||
|
errors[i] = err
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(length)
|
||||||
|
for i := range keys {
|
||||||
|
go resolve(ctx, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// errs is nil unless there exists a non-nil error.
|
||||||
|
// This prevents dataloader from returning a slice of all-nil errors.
|
||||||
|
var errs []error
|
||||||
|
for _, e := range errors {
|
||||||
|
if e != nil {
|
||||||
|
errs = errors
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c <- &ResultMany{Data: data, Error: errs}
|
||||||
|
close(c)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
value *ResultMany
|
||||||
|
}
|
||||||
|
|
||||||
|
thunkMany := func() ([]interface{}, []error) {
|
||||||
|
result.mu.RLock()
|
||||||
|
resultNotSet := result.value == nil
|
||||||
|
result.mu.RUnlock()
|
||||||
|
|
||||||
|
if resultNotSet {
|
||||||
|
result.mu.Lock()
|
||||||
|
if v, ok := <-c; ok {
|
||||||
|
result.value = v
|
||||||
|
}
|
||||||
|
result.mu.Unlock()
|
||||||
|
}
|
||||||
|
result.mu.RLock()
|
||||||
|
defer result.mu.RUnlock()
|
||||||
|
return result.value.Data, result.value.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
defer finish(thunkMany)
|
||||||
|
return thunkMany
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the value at `key` from the cache, it it exsits. Returs self for method chaining
|
||||||
|
func (l *Loader) Clear(ctx context.Context, key Key) Interface {
|
||||||
|
l.cacheLock.Lock()
|
||||||
|
l.cache.Delete(ctx, key)
|
||||||
|
l.cacheLock.Unlock()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAll clears the entire cache. To be used when some event results in unknown invalidations.
|
||||||
|
// Returns self for method chaining.
|
||||||
|
func (l *Loader) ClearAll() Interface {
|
||||||
|
l.cacheLock.Lock()
|
||||||
|
l.cache.Clear()
|
||||||
|
l.cacheLock.Unlock()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prime adds the provided key and value to the cache. If the key already exists, no change is made.
|
||||||
|
// Returns self for method chaining
|
||||||
|
func (l *Loader) Prime(ctx context.Context, key Key, value interface{}) Interface {
|
||||||
|
if _, ok := l.cache.Get(ctx, key); !ok {
|
||||||
|
thunk := func() (interface{}, error) {
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
l.cache.Set(ctx, key, thunk)
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Loader) reset() {
|
||||||
|
l.count = 0
|
||||||
|
l.curBatcher = nil
|
||||||
|
|
||||||
|
if l.clearCacheOnBatch {
|
||||||
|
l.cache.Clear()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type batcher struct {
|
||||||
|
input chan *batchRequest
|
||||||
|
batchFn BatchFunc
|
||||||
|
finished bool
|
||||||
|
silent bool
|
||||||
|
tracer Tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatcher returns a batcher for the current requests
|
||||||
|
// all the batcher methods must be protected by a global batchLock
|
||||||
|
func (l *Loader) newBatcher(silent bool, tracer Tracer) *batcher {
|
||||||
|
return &batcher{
|
||||||
|
input: make(chan *batchRequest, l.inputCap),
|
||||||
|
batchFn: l.batchFn,
|
||||||
|
silent: silent,
|
||||||
|
tracer: tracer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop receiving input and process batch function
|
||||||
|
func (b *batcher) end() {
|
||||||
|
if !b.finished {
|
||||||
|
close(b.input)
|
||||||
|
b.finished = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute the batch of all items in queue
|
||||||
|
func (b *batcher) batch(originalContext context.Context) {
|
||||||
|
var (
|
||||||
|
keys = make(Keys, 0)
|
||||||
|
reqs = make([]*batchRequest, 0)
|
||||||
|
items = make([]*Result, 0)
|
||||||
|
panicErr interface{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for item := range b.input {
|
||||||
|
keys = append(keys, item.key)
|
||||||
|
reqs = append(reqs, item)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, finish := b.tracer.TraceBatch(originalContext, keys)
|
||||||
|
defer finish(items)
|
||||||
|
|
||||||
|
func() {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
panicErr = r
|
||||||
|
if b.silent {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const size = 64 << 10
|
||||||
|
buf := make([]byte, size)
|
||||||
|
buf = buf[:runtime.Stack(buf, false)]
|
||||||
|
log.Printf("Dataloader: Panic received in batch function:: %v\n%s", panicErr, buf)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
items = b.batchFn(ctx, keys)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if panicErr != nil {
|
||||||
|
for _, req := range reqs {
|
||||||
|
req.channel <- &Result{Error: fmt.Errorf("Panic received in batch function: %v", panicErr)}
|
||||||
|
close(req.channel)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(items) != len(keys) {
|
||||||
|
err := &Result{Error: fmt.Errorf(`
|
||||||
|
The batch function supplied did not return an array of responses
|
||||||
|
the same length as the array of keys.
|
||||||
|
|
||||||
|
Keys:
|
||||||
|
%v
|
||||||
|
|
||||||
|
Values:
|
||||||
|
%v
|
||||||
|
`, keys, items)}
|
||||||
|
|
||||||
|
for _, req := range reqs {
|
||||||
|
req.channel <- err
|
||||||
|
close(req.channel)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, req := range reqs {
|
||||||
|
req.channel <- items[i]
|
||||||
|
close(req.channel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait the appropriate amount of time for the provided batcher
|
||||||
|
func (l *Loader) sleeper(b *batcher, close chan bool) {
|
||||||
|
select {
|
||||||
|
// used by batch to close early. usually triggered by max batch size
|
||||||
|
case <-close:
|
||||||
|
return
|
||||||
|
// this will move this goroutine to the back of the callstack?
|
||||||
|
case <-time.After(l.wait):
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset
|
||||||
|
// this is protected by the batchLock to avoid closing the batcher input
|
||||||
|
// channel while Load is inserting a request
|
||||||
|
l.batchLock.Lock()
|
||||||
|
b.end()
|
||||||
|
|
||||||
|
// We can end here also if the batcher has already been closed and a
|
||||||
|
// new one has been created. So reset the loader state only if the batcher
|
||||||
|
// is the current one
|
||||||
|
if l.curBatcher == b {
|
||||||
|
l.reset()
|
||||||
|
}
|
||||||
|
l.batchLock.Unlock()
|
||||||
|
}
|
65
src/vendor/github.com/graph-gophers/dataloader/inMemoryCache.go
generated
vendored
Normal file
65
src/vendor/github.com/graph-gophers/dataloader/inMemoryCache.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
// +build !go1.9
|
||||||
|
|
||||||
|
package dataloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InMemoryCache is an in memory implementation of Cache interface.
|
||||||
|
// this simple implementation is well suited for
|
||||||
|
// a "per-request" dataloader (i.e. one that only lives
|
||||||
|
// for the life of an http request) but it not well suited
|
||||||
|
// for long lived cached items.
|
||||||
|
type InMemoryCache struct {
|
||||||
|
items map[string]Thunk
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCache constructs a new InMemoryCache
|
||||||
|
func NewCache() *InMemoryCache {
|
||||||
|
items := make(map[string]Thunk)
|
||||||
|
return &InMemoryCache{
|
||||||
|
items: items,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the `value` at `key` in the cache
|
||||||
|
func (c *InMemoryCache) Set(_ context.Context, key Key, value Thunk) {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.items[key.String()] = value
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the value at `key` if it exsits, returns value (or nil) and bool
|
||||||
|
// indicating of value was found
|
||||||
|
func (c *InMemoryCache) Get(_ context.Context, key Key) (Thunk, bool) {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
|
item, found := c.items[key.String()]
|
||||||
|
if !found {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes item at `key` from cache
|
||||||
|
func (c *InMemoryCache) Delete(ctx context.Context, key Key) bool {
|
||||||
|
if _, found := c.Get(ctx, key); found {
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
delete(c.items, key.String())
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the entire cache
|
||||||
|
func (c *InMemoryCache) Clear() {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.items = map[string]Thunk{}
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
57
src/vendor/github.com/graph-gophers/dataloader/inMemoryCache_go19.go
generated
vendored
Normal file
57
src/vendor/github.com/graph-gophers/dataloader/inMemoryCache_go19.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// +build go1.9
|
||||||
|
|
||||||
|
package dataloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InMemoryCache is an in memory implementation of Cache interface.
|
||||||
|
// this simple implementation is well suited for
|
||||||
|
// a "per-request" dataloader (i.e. one that only lives
|
||||||
|
// for the life of an http request) but it not well suited
|
||||||
|
// for long lived cached items.
|
||||||
|
type InMemoryCache struct {
|
||||||
|
items *sync.Map
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCache constructs a new InMemoryCache
|
||||||
|
func NewCache() *InMemoryCache {
|
||||||
|
return &InMemoryCache{
|
||||||
|
items: &sync.Map{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the `value` at `key` in the cache
|
||||||
|
func (c *InMemoryCache) Set(_ context.Context, key Key, value Thunk) {
|
||||||
|
c.items.Store(key.String(), value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the value at `key` if it exsits, returns value (or nil) and bool
|
||||||
|
// indicating of value was found
|
||||||
|
func (c *InMemoryCache) Get(_ context.Context, key Key) (Thunk, bool) {
|
||||||
|
item, found := c.items.Load(key.String())
|
||||||
|
if !found {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return item.(Thunk), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes item at `key` from cache
|
||||||
|
func (c *InMemoryCache) Delete(_ context.Context, key Key) bool {
|
||||||
|
if _, found := c.items.Load(key.String()); found {
|
||||||
|
c.items.Delete(key.String())
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear clears the entire cache
|
||||||
|
func (c *InMemoryCache) Clear() {
|
||||||
|
c.items.Range(func(key, _ interface{}) bool {
|
||||||
|
c.items.Delete(key)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
39
src/vendor/github.com/graph-gophers/dataloader/key.go
generated
vendored
Normal file
39
src/vendor/github.com/graph-gophers/dataloader/key.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package dataloader
|
||||||
|
|
||||||
|
// Key is the interface that all keys need to implement
|
||||||
|
type Key interface {
|
||||||
|
// String returns a guaranteed unique string that can be used to identify an object
|
||||||
|
String() string
|
||||||
|
// Raw returns the raw, underlaying value of the key
|
||||||
|
Raw() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys wraps a slice of Key types to provide some convenience methods.
|
||||||
|
type Keys []Key
|
||||||
|
|
||||||
|
// Keys returns the list of strings. One for each "Key" in the list
|
||||||
|
func (l Keys) Keys() []string {
|
||||||
|
list := make([]string, len(l))
|
||||||
|
for i := range l {
|
||||||
|
list[i] = l[i].String()
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringKey implements the Key interface for a string
|
||||||
|
type StringKey string
|
||||||
|
|
||||||
|
// String is an identity method. Used to implement String interface
|
||||||
|
func (k StringKey) String() string { return string(k) }
|
||||||
|
|
||||||
|
// String is an identity method. Used to implement Key Raw
|
||||||
|
func (k StringKey) Raw() interface{} { return k }
|
||||||
|
|
||||||
|
// NewKeysFromStrings converts a `[]strings` to a `Keys` ([]Key)
|
||||||
|
func NewKeysFromStrings(strings []string) Keys {
|
||||||
|
list := make(Keys, len(strings))
|
||||||
|
for i := range strings {
|
||||||
|
list[i] = StringKey(strings[i])
|
||||||
|
}
|
||||||
|
return list
|
||||||
|
}
|
78
src/vendor/github.com/graph-gophers/dataloader/trace.go
generated
vendored
Normal file
78
src/vendor/github.com/graph-gophers/dataloader/trace.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
package dataloader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
opentracing "github.com/opentracing/opentracing-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TraceLoadFinishFunc func(Thunk)
|
||||||
|
type TraceLoadManyFinishFunc func(ThunkMany)
|
||||||
|
type TraceBatchFinishFunc func([]*Result)
|
||||||
|
|
||||||
|
// Tracer is an interface that may be used to implement tracing.
|
||||||
|
type Tracer interface {
|
||||||
|
// TraceLoad will trace the calls to Load
|
||||||
|
TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc)
|
||||||
|
// TraceLoadMany will trace the calls to LoadMany
|
||||||
|
TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc)
|
||||||
|
// TraceBatch will trace data loader batches
|
||||||
|
TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTracing Tracer implements a tracer that can be used with the Open Tracing standard.
|
||||||
|
type OpenTracingTracer struct{}
|
||||||
|
|
||||||
|
// TraceLoad will trace a call to dataloader.LoadMany with Open Tracing
|
||||||
|
func (OpenTracingTracer) TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc) {
|
||||||
|
span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: load")
|
||||||
|
|
||||||
|
span.SetTag("dataloader.key", key.String())
|
||||||
|
|
||||||
|
return spanCtx, func(thunk Thunk) {
|
||||||
|
// TODO: is there anything we should do with the results?
|
||||||
|
span.Finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceLoadMany will trace a call to dataloader.LoadMany with Open Tracing
|
||||||
|
func (OpenTracingTracer) TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc) {
|
||||||
|
span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: loadmany")
|
||||||
|
|
||||||
|
span.SetTag("dataloader.keys", keys.Keys())
|
||||||
|
|
||||||
|
return spanCtx, func(thunk ThunkMany) {
|
||||||
|
// TODO: is there anything we should do with the results?
|
||||||
|
span.Finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceBatch will trace a call to dataloader.LoadMany with Open Tracing
|
||||||
|
func (OpenTracingTracer) TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc) {
|
||||||
|
span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: batch")
|
||||||
|
|
||||||
|
span.SetTag("dataloader.keys", keys.Keys())
|
||||||
|
|
||||||
|
return spanCtx, func(results []*Result) {
|
||||||
|
// TODO: is there anything we should do with the results?
|
||||||
|
span.Finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoopTracer is the default (noop) tracer
|
||||||
|
type NoopTracer struct{}
|
||||||
|
|
||||||
|
// TraceLoad is a noop function
|
||||||
|
func (NoopTracer) TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc) {
|
||||||
|
return ctx, func(Thunk) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceLoadMany is a noop function
|
||||||
|
func (NoopTracer) TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc) {
|
||||||
|
return ctx, func(ThunkMany) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TraceBatch is a noop function
|
||||||
|
func (NoopTracer) TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc) {
|
||||||
|
return ctx, func(result []*Result) {}
|
||||||
|
}
|
1
src/vendor/github.com/opentracing/opentracing-go/.gitignore
generated
vendored
Normal file
1
src/vendor/github.com/opentracing/opentracing-go/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
coverage.txt
|
20
src/vendor/github.com/opentracing/opentracing-go/.travis.yml
generated
vendored
Normal file
20
src/vendor/github.com/opentracing/opentracing-go/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- go: "1.11.x"
|
||||||
|
- go: "1.12.x"
|
||||||
|
- go: "tip"
|
||||||
|
env:
|
||||||
|
- LINT=true
|
||||||
|
- COVERAGE=true
|
||||||
|
|
||||||
|
install:
|
||||||
|
- if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
|
||||||
|
- go get -u github.com/stretchr/testify/...
|
||||||
|
|
||||||
|
script:
|
||||||
|
- make test
|
||||||
|
- go build ./...
|
||||||
|
- if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
|
||||||
|
- if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
|
46
src/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
generated
vendored
Normal file
46
src/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
Changes by Version
|
||||||
|
==================
|
||||||
|
|
||||||
|
1.1.0 (2019-03-23)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Notable changes:
|
||||||
|
- The library is now released under Apache 2.0 license
|
||||||
|
- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
|
||||||
|
- 'golang.org/x/net/context' is replaced with 'context' from the standard library
|
||||||
|
|
||||||
|
List of all changes:
|
||||||
|
|
||||||
|
- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
|
||||||
|
- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
|
||||||
|
- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
|
||||||
|
- Update license to Apache 2.0 (#181) <Andrea Kao>
|
||||||
|
- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
|
||||||
|
- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
|
||||||
|
- Fix race condition in MockSpan.Context() (#170) <Brad>
|
||||||
|
- Add PeerHostIPv4.SetString() (#155) <NeoCN>
|
||||||
|
- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
|
||||||
|
|
||||||
|
|
||||||
|
1.0.2 (2017-04-26)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
- Add more semantic tags (#139) <Rustam Zagirov>
|
||||||
|
|
||||||
|
|
||||||
|
1.0.1 (2017-02-06)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
- Correct spelling in comments <Ben Sigelman>
|
||||||
|
- Address race in nextMockID() (#123) <bill fumerola>
|
||||||
|
- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
|
||||||
|
- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
|
||||||
|
- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
|
||||||
|
- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
|
||||||
|
- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
|
||||||
|
|
||||||
|
1.0.0 (2016-09-26)
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
|
||||||
|
|
201
src/vendor/github.com/opentracing/opentracing-go/LICENSE
generated
vendored
Normal file
201
src/vendor/github.com/opentracing/opentracing-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright 2016 The OpenTracing Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
20
src/vendor/github.com/opentracing/opentracing-go/Makefile
generated
vendored
Normal file
20
src/vendor/github.com/opentracing/opentracing-go/Makefile
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
.DEFAULT_GOAL := test-and-lint
|
||||||
|
|
||||||
|
.PHONY: test-and-lint
|
||||||
|
test-and-lint: test lint
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
go test -v -cover -race ./...
|
||||||
|
|
||||||
|
.PHONY: cover
|
||||||
|
cover:
|
||||||
|
go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint:
|
||||||
|
go fmt ./...
|
||||||
|
golint ./...
|
||||||
|
@# Run again with magic to exit non-zero if golint outputs anything.
|
||||||
|
@! (golint ./... | read dummy)
|
||||||
|
go vet ./...
|
171
src/vendor/github.com/opentracing/opentracing-go/README.md
generated
vendored
Normal file
171
src/vendor/github.com/opentracing/opentracing-go/README.md
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
|
||||||
|
[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
|
||||||
|
|
||||||
|
# OpenTracing API for Go
|
||||||
|
|
||||||
|
This package is a Go platform API for OpenTracing.
|
||||||
|
|
||||||
|
## Required Reading
|
||||||
|
|
||||||
|
In order to understand the Go platform API, one must first be familiar with the
|
||||||
|
[OpenTracing project](https://opentracing.io) and
|
||||||
|
[terminology](https://opentracing.io/specification/) more specifically.
|
||||||
|
|
||||||
|
## API overview for those adding instrumentation
|
||||||
|
|
||||||
|
Everyday consumers of this `opentracing` package really only need to worry
|
||||||
|
about a couple of key abstractions: the `StartSpan` function, the `Span`
|
||||||
|
interface, and binding a `Tracer` at `main()`-time. Here are code snippets
|
||||||
|
demonstrating some important use cases.
|
||||||
|
|
||||||
|
#### Singleton initialization
|
||||||
|
|
||||||
|
The simplest starting point is `./default_tracer.go`. As early as possible, call
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/opentracing/opentracing-go"
|
||||||
|
import ".../some_tracing_impl"
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
opentracing.SetGlobalTracer(
|
||||||
|
// tracing impl specific:
|
||||||
|
some_tracing_impl.New(...),
|
||||||
|
)
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Non-Singleton initialization
|
||||||
|
|
||||||
|
If you prefer direct control to singletons, manage ownership of the
|
||||||
|
`opentracing.Tracer` implementation explicitly.
|
||||||
|
|
||||||
|
#### Creating a Span given an existing Go `context.Context`
|
||||||
|
|
||||||
|
If you use `context.Context` in your application, OpenTracing's Go library will
|
||||||
|
happily rely on it for `Span` propagation. To start a new (blocking child)
|
||||||
|
`Span`, you can use `StartSpanFromContext`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func xyz(ctx context.Context, ...) {
|
||||||
|
...
|
||||||
|
span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
|
||||||
|
defer span.Finish()
|
||||||
|
span.LogFields(
|
||||||
|
log.String("event", "soft error"),
|
||||||
|
log.String("type", "cache timeout"),
|
||||||
|
log.Int("waited.millis", 1500))
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Starting an empty trace by creating a "root span"
|
||||||
|
|
||||||
|
It's always possible to create a "root" `Span` with no parent or other causal
|
||||||
|
reference.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func xyz() {
|
||||||
|
...
|
||||||
|
sp := opentracing.StartSpan("operation_name")
|
||||||
|
defer sp.Finish()
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Creating a (child) Span given an existing (parent) Span
|
||||||
|
|
||||||
|
```go
|
||||||
|
func xyz(parentSpan opentracing.Span, ...) {
|
||||||
|
...
|
||||||
|
sp := opentracing.StartSpan(
|
||||||
|
"operation_name",
|
||||||
|
opentracing.ChildOf(parentSpan.Context()))
|
||||||
|
defer sp.Finish()
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Serializing to the wire
|
||||||
|
|
||||||
|
```go
|
||||||
|
func makeSomeRequest(ctx context.Context) ... {
|
||||||
|
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||||
|
httpClient := &http.Client{}
|
||||||
|
httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
|
||||||
|
|
||||||
|
// Transmit the span's TraceContext as HTTP headers on our
|
||||||
|
// outbound request.
|
||||||
|
opentracing.GlobalTracer().Inject(
|
||||||
|
span.Context(),
|
||||||
|
opentracing.HTTPHeaders,
|
||||||
|
opentracing.HTTPHeadersCarrier(httpReq.Header))
|
||||||
|
|
||||||
|
resp, err := httpClient.Do(httpReq)
|
||||||
|
...
|
||||||
|
}
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Deserializing from the wire
|
||||||
|
|
||||||
|
```go
|
||||||
|
http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
var serverSpan opentracing.Span
|
||||||
|
appSpecificOperationName := ...
|
||||||
|
wireContext, err := opentracing.GlobalTracer().Extract(
|
||||||
|
opentracing.HTTPHeaders,
|
||||||
|
opentracing.HTTPHeadersCarrier(req.Header))
|
||||||
|
if err != nil {
|
||||||
|
// Optionally record something about err here
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the span referring to the RPC client if available.
|
||||||
|
// If wireContext == nil, a root span will be created.
|
||||||
|
serverSpan = opentracing.StartSpan(
|
||||||
|
appSpecificOperationName,
|
||||||
|
ext.RPCServerOption(wireContext))
|
||||||
|
|
||||||
|
defer serverSpan.Finish()
|
||||||
|
|
||||||
|
ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Conditionally capture a field using `log.Noop`
|
||||||
|
|
||||||
|
In some situations, you may want to dynamically decide whether or not
|
||||||
|
to log a field. For example, you may want to capture additional data,
|
||||||
|
such as a customer ID, in non-production environments:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func Customer(order *Order) log.Field {
|
||||||
|
if os.Getenv("ENVIRONMENT") == "dev" {
|
||||||
|
return log.String("customer", order.Customer.ID)
|
||||||
|
}
|
||||||
|
return log.Noop()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Goroutine-safety
|
||||||
|
|
||||||
|
The entire public API is goroutine-safe and does not require external
|
||||||
|
synchronization.
|
||||||
|
|
||||||
|
## API pointers for those implementing a tracing system
|
||||||
|
|
||||||
|
Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
|
||||||
|
|
||||||
|
## API compatibility
|
||||||
|
|
||||||
|
For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
|
||||||
|
|
||||||
|
## Tracer test suite
|
||||||
|
|
||||||
|
A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
[Apache 2.0 License](./LICENSE).
|
42
src/vendor/github.com/opentracing/opentracing-go/globaltracer.go
generated
vendored
Normal file
42
src/vendor/github.com/opentracing/opentracing-go/globaltracer.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
type registeredTracer struct {
|
||||||
|
tracer Tracer
|
||||||
|
isRegistered bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
globalTracer = registeredTracer{NoopTracer{}, false}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
|
||||||
|
// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
|
||||||
|
// opentracing.Tracer instance) should call SetGlobalTracer as early as
|
||||||
|
// possible in main(), prior to calling the `StartSpan` global func below.
|
||||||
|
// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
|
||||||
|
// (etc) globals are noops.
|
||||||
|
func SetGlobalTracer(tracer Tracer) {
|
||||||
|
globalTracer = registeredTracer{tracer, true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalTracer returns the global singleton `Tracer` implementation.
|
||||||
|
// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
|
||||||
|
// implementation that drops all data handed to it.
|
||||||
|
func GlobalTracer() Tracer {
|
||||||
|
return globalTracer.tracer
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
|
||||||
|
func StartSpan(operationName string, opts ...StartSpanOption) Span {
|
||||||
|
return globalTracer.tracer.StartSpan(operationName, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
|
||||||
|
func InitGlobalTracer(tracer Tracer) {
|
||||||
|
SetGlobalTracer(tracer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
|
||||||
|
func IsGlobalTracerRegistered() bool {
|
||||||
|
return globalTracer.isRegistered
|
||||||
|
}
|
60
src/vendor/github.com/opentracing/opentracing-go/gocontext.go
generated
vendored
Normal file
60
src/vendor/github.com/opentracing/opentracing-go/gocontext.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
|
var activeSpanKey = contextKey{}
|
||||||
|
|
||||||
|
// ContextWithSpan returns a new `context.Context` that holds a reference to
|
||||||
|
// `span`'s SpanContext.
|
||||||
|
func ContextWithSpan(ctx context.Context, span Span) context.Context {
|
||||||
|
return context.WithValue(ctx, activeSpanKey, span)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanFromContext returns the `Span` previously associated with `ctx`, or
|
||||||
|
// `nil` if no such `Span` could be found.
|
||||||
|
//
|
||||||
|
// NOTE: context.Context != SpanContext: the former is Go's intra-process
|
||||||
|
// context propagation mechanism, and the latter houses OpenTracing's per-Span
|
||||||
|
// identity and baggage information.
|
||||||
|
func SpanFromContext(ctx context.Context) Span {
|
||||||
|
val := ctx.Value(activeSpanKey)
|
||||||
|
if sp, ok := val.(Span); ok {
|
||||||
|
return sp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpanFromContext starts and returns a Span with `operationName`, using
|
||||||
|
// any Span found within `ctx` as a ChildOfRef. If no such parent could be
|
||||||
|
// found, StartSpanFromContext creates a root (parentless) Span.
|
||||||
|
//
|
||||||
|
// The second return value is a context.Context object built around the
|
||||||
|
// returned Span.
|
||||||
|
//
|
||||||
|
// Example usage:
|
||||||
|
//
|
||||||
|
// SomeFunction(ctx context.Context, ...) {
|
||||||
|
// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
|
||||||
|
// defer sp.Finish()
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
|
||||||
|
return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpanFromContextWithTracer starts and returns a span with `operationName`
|
||||||
|
// using a span found within the context as a ChildOfRef. If that doesn't exist
|
||||||
|
// it creates a root span. It also returns a context.Context object built
|
||||||
|
// around the returned span.
|
||||||
|
//
|
||||||
|
// It's behavior is identical to StartSpanFromContext except that it takes an explicit
|
||||||
|
// tracer as opposed to using the global tracer.
|
||||||
|
func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
|
||||||
|
if parentSpan := SpanFromContext(ctx); parentSpan != nil {
|
||||||
|
opts = append(opts, ChildOf(parentSpan.Context()))
|
||||||
|
}
|
||||||
|
span := tracer.StartSpan(operationName, opts...)
|
||||||
|
return span, ContextWithSpan(ctx, span)
|
||||||
|
}
|
269
src/vendor/github.com/opentracing/opentracing-go/log/field.go
generated
vendored
Normal file
269
src/vendor/github.com/opentracing/opentracing-go/log/field.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fieldType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
stringType fieldType = iota
|
||||||
|
boolType
|
||||||
|
intType
|
||||||
|
int32Type
|
||||||
|
uint32Type
|
||||||
|
int64Type
|
||||||
|
uint64Type
|
||||||
|
float32Type
|
||||||
|
float64Type
|
||||||
|
errorType
|
||||||
|
objectType
|
||||||
|
lazyLoggerType
|
||||||
|
noopType
|
||||||
|
)
|
||||||
|
|
||||||
|
// Field instances are constructed via LogBool, LogString, and so on.
|
||||||
|
// Tracing implementations may then handle them via the Field.Marshal
|
||||||
|
// method.
|
||||||
|
//
|
||||||
|
// "heavily influenced by" (i.e., partially stolen from)
|
||||||
|
// https://github.com/uber-go/zap
|
||||||
|
type Field struct {
|
||||||
|
key string
|
||||||
|
fieldType fieldType
|
||||||
|
numericVal int64
|
||||||
|
stringVal string
|
||||||
|
interfaceVal interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String adds a string-valued key:value pair to a Span.LogFields() record
|
||||||
|
func String(key, val string) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: stringType,
|
||||||
|
stringVal: val,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool adds a bool-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Bool(key string, val bool) Field {
|
||||||
|
var numericVal int64
|
||||||
|
if val {
|
||||||
|
numericVal = 1
|
||||||
|
}
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: boolType,
|
||||||
|
numericVal: numericVal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int adds an int-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Int(key string, val int) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: intType,
|
||||||
|
numericVal: int64(val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Int32(key string, val int32) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: int32Type,
|
||||||
|
numericVal: int64(val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Int64(key string, val int64) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: int64Type,
|
||||||
|
numericVal: val,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Uint32(key string, val uint32) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: uint32Type,
|
||||||
|
numericVal: int64(val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Uint64(key string, val uint64) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: uint64Type,
|
||||||
|
numericVal: int64(val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Float32(key string, val float32) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: float32Type,
|
||||||
|
numericVal: int64(math.Float32bits(val)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Float64(key string, val float64) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: float64Type,
|
||||||
|
numericVal: int64(math.Float64bits(val)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error adds an error with the key "error" to a Span.LogFields() record
|
||||||
|
func Error(err error) Field {
|
||||||
|
return Field{
|
||||||
|
key: "error",
|
||||||
|
fieldType: errorType,
|
||||||
|
interfaceVal: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Object adds an object-valued key:value pair to a Span.LogFields() record
|
||||||
|
func Object(key string, obj interface{}) Field {
|
||||||
|
return Field{
|
||||||
|
key: key,
|
||||||
|
fieldType: objectType,
|
||||||
|
interfaceVal: obj,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LazyLogger allows for user-defined, late-bound logging of arbitrary data
|
||||||
|
type LazyLogger func(fv Encoder)
|
||||||
|
|
||||||
|
// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
|
||||||
|
// implementation will call the LazyLogger function at an indefinite time in
|
||||||
|
// the future (after Lazy() returns).
|
||||||
|
func Lazy(ll LazyLogger) Field {
|
||||||
|
return Field{
|
||||||
|
fieldType: lazyLoggerType,
|
||||||
|
interfaceVal: ll,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Noop creates a no-op log field that should be ignored by the tracer.
|
||||||
|
// It can be used to capture optional fields, for example those that should
|
||||||
|
// only be logged in non-production environment:
|
||||||
|
//
|
||||||
|
// func customerField(order *Order) log.Field {
|
||||||
|
// if os.Getenv("ENVIRONMENT") == "dev" {
|
||||||
|
// return log.String("customer", order.Customer.ID)
|
||||||
|
// }
|
||||||
|
// return log.Noop()
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// span.LogFields(log.String("event", "purchase"), customerField(order))
|
||||||
|
//
|
||||||
|
func Noop() Field {
|
||||||
|
return Field{
|
||||||
|
fieldType: noopType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder allows access to the contents of a Field (via a call to
|
||||||
|
// Field.Marshal).
|
||||||
|
//
|
||||||
|
// Tracer implementations typically provide an implementation of Encoder;
|
||||||
|
// OpenTracing callers typically do not need to concern themselves with it.
|
||||||
|
type Encoder interface {
|
||||||
|
EmitString(key, value string)
|
||||||
|
EmitBool(key string, value bool)
|
||||||
|
EmitInt(key string, value int)
|
||||||
|
EmitInt32(key string, value int32)
|
||||||
|
EmitInt64(key string, value int64)
|
||||||
|
EmitUint32(key string, value uint32)
|
||||||
|
EmitUint64(key string, value uint64)
|
||||||
|
EmitFloat32(key string, value float32)
|
||||||
|
EmitFloat64(key string, value float64)
|
||||||
|
EmitObject(key string, value interface{})
|
||||||
|
EmitLazyLogger(value LazyLogger)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal passes a Field instance through to the appropriate
|
||||||
|
// field-type-specific method of an Encoder.
|
||||||
|
func (lf Field) Marshal(visitor Encoder) {
|
||||||
|
switch lf.fieldType {
|
||||||
|
case stringType:
|
||||||
|
visitor.EmitString(lf.key, lf.stringVal)
|
||||||
|
case boolType:
|
||||||
|
visitor.EmitBool(lf.key, lf.numericVal != 0)
|
||||||
|
case intType:
|
||||||
|
visitor.EmitInt(lf.key, int(lf.numericVal))
|
||||||
|
case int32Type:
|
||||||
|
visitor.EmitInt32(lf.key, int32(lf.numericVal))
|
||||||
|
case int64Type:
|
||||||
|
visitor.EmitInt64(lf.key, int64(lf.numericVal))
|
||||||
|
case uint32Type:
|
||||||
|
visitor.EmitUint32(lf.key, uint32(lf.numericVal))
|
||||||
|
case uint64Type:
|
||||||
|
visitor.EmitUint64(lf.key, uint64(lf.numericVal))
|
||||||
|
case float32Type:
|
||||||
|
visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
|
||||||
|
case float64Type:
|
||||||
|
visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
|
||||||
|
case errorType:
|
||||||
|
if err, ok := lf.interfaceVal.(error); ok {
|
||||||
|
visitor.EmitString(lf.key, err.Error())
|
||||||
|
} else {
|
||||||
|
visitor.EmitString(lf.key, "<nil>")
|
||||||
|
}
|
||||||
|
case objectType:
|
||||||
|
visitor.EmitObject(lf.key, lf.interfaceVal)
|
||||||
|
case lazyLoggerType:
|
||||||
|
visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
|
||||||
|
case noopType:
|
||||||
|
// intentionally left blank
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the field's key.
|
||||||
|
func (lf Field) Key() string {
|
||||||
|
return lf.key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the field's value as interface{}.
|
||||||
|
func (lf Field) Value() interface{} {
|
||||||
|
switch lf.fieldType {
|
||||||
|
case stringType:
|
||||||
|
return lf.stringVal
|
||||||
|
case boolType:
|
||||||
|
return lf.numericVal != 0
|
||||||
|
case intType:
|
||||||
|
return int(lf.numericVal)
|
||||||
|
case int32Type:
|
||||||
|
return int32(lf.numericVal)
|
||||||
|
case int64Type:
|
||||||
|
return int64(lf.numericVal)
|
||||||
|
case uint32Type:
|
||||||
|
return uint32(lf.numericVal)
|
||||||
|
case uint64Type:
|
||||||
|
return uint64(lf.numericVal)
|
||||||
|
case float32Type:
|
||||||
|
return math.Float32frombits(uint32(lf.numericVal))
|
||||||
|
case float64Type:
|
||||||
|
return math.Float64frombits(uint64(lf.numericVal))
|
||||||
|
case errorType, objectType, lazyLoggerType:
|
||||||
|
return lf.interfaceVal
|
||||||
|
case noopType:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the key and value.
|
||||||
|
func (lf Field) String() string {
|
||||||
|
return fmt.Sprint(lf.key, ":", lf.Value())
|
||||||
|
}
|
54
src/vendor/github.com/opentracing/opentracing-go/log/util.go
generated
vendored
Normal file
54
src/vendor/github.com/opentracing/opentracing-go/log/util.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package log
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
|
||||||
|
// a la Span.LogFields().
|
||||||
|
func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
|
||||||
|
if len(keyValues)%2 != 0 {
|
||||||
|
return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
|
||||||
|
}
|
||||||
|
fields := make([]Field, len(keyValues)/2)
|
||||||
|
for i := 0; i*2 < len(keyValues); i++ {
|
||||||
|
key, ok := keyValues[i*2].(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"non-string key (pair #%d): %T",
|
||||||
|
i, keyValues[i*2])
|
||||||
|
}
|
||||||
|
switch typedVal := keyValues[i*2+1].(type) {
|
||||||
|
case bool:
|
||||||
|
fields[i] = Bool(key, typedVal)
|
||||||
|
case string:
|
||||||
|
fields[i] = String(key, typedVal)
|
||||||
|
case int:
|
||||||
|
fields[i] = Int(key, typedVal)
|
||||||
|
case int8:
|
||||||
|
fields[i] = Int32(key, int32(typedVal))
|
||||||
|
case int16:
|
||||||
|
fields[i] = Int32(key, int32(typedVal))
|
||||||
|
case int32:
|
||||||
|
fields[i] = Int32(key, typedVal)
|
||||||
|
case int64:
|
||||||
|
fields[i] = Int64(key, typedVal)
|
||||||
|
case uint:
|
||||||
|
fields[i] = Uint64(key, uint64(typedVal))
|
||||||
|
case uint64:
|
||||||
|
fields[i] = Uint64(key, typedVal)
|
||||||
|
case uint8:
|
||||||
|
fields[i] = Uint32(key, uint32(typedVal))
|
||||||
|
case uint16:
|
||||||
|
fields[i] = Uint32(key, uint32(typedVal))
|
||||||
|
case uint32:
|
||||||
|
fields[i] = Uint32(key, typedVal)
|
||||||
|
case float32:
|
||||||
|
fields[i] = Float32(key, typedVal)
|
||||||
|
case float64:
|
||||||
|
fields[i] = Float64(key, typedVal)
|
||||||
|
default:
|
||||||
|
// When in doubt, coerce to a string
|
||||||
|
fields[i] = String(key, fmt.Sprint(typedVal))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fields, nil
|
||||||
|
}
|
64
src/vendor/github.com/opentracing/opentracing-go/noop.go
generated
vendored
Normal file
64
src/vendor/github.com/opentracing/opentracing-go/noop.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
import "github.com/opentracing/opentracing-go/log"
|
||||||
|
|
||||||
|
// A NoopTracer is a trivial, minimum overhead implementation of Tracer
|
||||||
|
// for which all operations are no-ops.
|
||||||
|
//
|
||||||
|
// The primary use of this implementation is in libraries, such as RPC
|
||||||
|
// frameworks, that make tracing an optional feature controlled by the
|
||||||
|
// end user. A no-op implementation allows said libraries to use it
|
||||||
|
// as the default Tracer and to write instrumentation that does
|
||||||
|
// not need to keep checking if the tracer instance is nil.
|
||||||
|
//
|
||||||
|
// For the same reason, the NoopTracer is the default "global" tracer
|
||||||
|
// (see GlobalTracer and SetGlobalTracer functions).
|
||||||
|
//
|
||||||
|
// WARNING: NoopTracer does not support baggage propagation.
|
||||||
|
type NoopTracer struct{}
|
||||||
|
|
||||||
|
type noopSpan struct{}
|
||||||
|
type noopSpanContext struct{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultNoopSpanContext = noopSpanContext{}
|
||||||
|
defaultNoopSpan = noopSpan{}
|
||||||
|
defaultNoopTracer = NoopTracer{}
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
emptyString = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
// noopSpanContext:
|
||||||
|
func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
|
||||||
|
|
||||||
|
// noopSpan:
|
||||||
|
func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
|
||||||
|
func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
|
||||||
|
func (n noopSpan) BaggageItem(key string) string { return emptyString }
|
||||||
|
func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
|
||||||
|
func (n noopSpan) LogFields(fields ...log.Field) {}
|
||||||
|
func (n noopSpan) LogKV(keyVals ...interface{}) {}
|
||||||
|
func (n noopSpan) Finish() {}
|
||||||
|
func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
|
||||||
|
func (n noopSpan) SetOperationName(operationName string) Span { return n }
|
||||||
|
func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
|
||||||
|
func (n noopSpan) LogEvent(event string) {}
|
||||||
|
func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
|
||||||
|
func (n noopSpan) Log(data LogData) {}
|
||||||
|
|
||||||
|
// StartSpan belongs to the Tracer interface.
|
||||||
|
func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
|
||||||
|
return defaultNoopSpan
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inject belongs to the Tracer interface.
|
||||||
|
func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract belongs to the Tracer interface.
|
||||||
|
func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
|
||||||
|
return nil, ErrSpanContextNotFound
|
||||||
|
}
|
176
src/vendor/github.com/opentracing/opentracing-go/propagation.go
generated
vendored
Normal file
176
src/vendor/github.com/opentracing/opentracing-go/propagation.go
generated
vendored
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// CORE PROPAGATION INTERFACES:
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
|
||||||
|
// Tracer.Extract() is not recognized by the Tracer implementation.
|
||||||
|
ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
|
||||||
|
|
||||||
|
// ErrSpanContextNotFound occurs when the `carrier` passed to
|
||||||
|
// Tracer.Extract() is valid and uncorrupted but has insufficient
|
||||||
|
// information to extract a SpanContext.
|
||||||
|
ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
|
||||||
|
|
||||||
|
// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
|
||||||
|
// operate on a SpanContext which it is not prepared to handle (for
|
||||||
|
// example, since it was created by a different tracer implementation).
|
||||||
|
ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
|
||||||
|
|
||||||
|
// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
|
||||||
|
// implementations expect a different type of `carrier` than they are
|
||||||
|
// given.
|
||||||
|
ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
|
||||||
|
|
||||||
|
// ErrSpanContextCorrupted occurs when the `carrier` passed to
|
||||||
|
// Tracer.Extract() is of the expected type but is corrupted.
|
||||||
|
ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
|
||||||
|
)
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// BUILTIN PROPAGATION FORMATS:
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// BuiltinFormat is used to demarcate the values within package `opentracing`
|
||||||
|
// that are intended for use with the Tracer.Inject() and Tracer.Extract()
|
||||||
|
// methods.
|
||||||
|
type BuiltinFormat byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Binary represents SpanContexts as opaque binary data.
|
||||||
|
//
|
||||||
|
// For Tracer.Inject(): the carrier must be an `io.Writer`.
|
||||||
|
//
|
||||||
|
// For Tracer.Extract(): the carrier must be an `io.Reader`.
|
||||||
|
Binary BuiltinFormat = iota
|
||||||
|
|
||||||
|
// TextMap represents SpanContexts as key:value string pairs.
|
||||||
|
//
|
||||||
|
// Unlike HTTPHeaders, the TextMap format does not restrict the key or
|
||||||
|
// value character sets in any way.
|
||||||
|
//
|
||||||
|
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
|
||||||
|
//
|
||||||
|
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
|
||||||
|
TextMap
|
||||||
|
|
||||||
|
// HTTPHeaders represents SpanContexts as HTTP header string pairs.
|
||||||
|
//
|
||||||
|
// Unlike TextMap, the HTTPHeaders format requires that the keys and values
|
||||||
|
// be valid as HTTP headers as-is (i.e., character casing may be unstable
|
||||||
|
// and special characters are disallowed in keys, values should be
|
||||||
|
// URL-escaped, etc).
|
||||||
|
//
|
||||||
|
// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
|
||||||
|
//
|
||||||
|
// For Tracer.Extract(): the carrier must be a `TextMapReader`.
|
||||||
|
//
|
||||||
|
// See HTTPHeadersCarrier for an implementation of both TextMapWriter
|
||||||
|
// and TextMapReader that defers to an http.Header instance for storage.
|
||||||
|
// For example, Inject():
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// err := span.Tracer().Inject(
|
||||||
|
// span.Context(), opentracing.HTTPHeaders, carrier)
|
||||||
|
//
|
||||||
|
// Or Extract():
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// clientContext, err := tracer.Extract(
|
||||||
|
// opentracing.HTTPHeaders, carrier)
|
||||||
|
//
|
||||||
|
HTTPHeaders
|
||||||
|
)
|
||||||
|
|
||||||
|
// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
|
||||||
|
// it, the caller can encode a SpanContext for propagation as entries in a map
|
||||||
|
// of unicode strings.
|
||||||
|
type TextMapWriter interface {
|
||||||
|
// Set a key:value pair to the carrier. Multiple calls to Set() for the
|
||||||
|
// same key leads to undefined behavior.
|
||||||
|
//
|
||||||
|
// NOTE: The backing store for the TextMapWriter may contain data unrelated
|
||||||
|
// to SpanContext. As such, Inject() and Extract() implementations that
|
||||||
|
// call the TextMapWriter and TextMapReader interfaces must agree on a
|
||||||
|
// prefix or other convention to distinguish their own key:value pairs.
|
||||||
|
Set(key, val string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
|
||||||
|
// the caller can decode a propagated SpanContext as entries in a map of
|
||||||
|
// unicode strings.
|
||||||
|
type TextMapReader interface {
|
||||||
|
// ForeachKey returns TextMap contents via repeated calls to the `handler`
|
||||||
|
// function. If any call to `handler` returns a non-nil error, ForeachKey
|
||||||
|
// terminates and returns that error.
|
||||||
|
//
|
||||||
|
// NOTE: The backing store for the TextMapReader may contain data unrelated
|
||||||
|
// to SpanContext. As such, Inject() and Extract() implementations that
|
||||||
|
// call the TextMapWriter and TextMapReader interfaces must agree on a
|
||||||
|
// prefix or other convention to distinguish their own key:value pairs.
|
||||||
|
//
|
||||||
|
// The "foreach" callback pattern reduces unnecessary copying in some cases
|
||||||
|
// and also allows implementations to hold locks while the map is read.
|
||||||
|
ForeachKey(handler func(key, val string) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// TextMapCarrier allows the use of regular map[string]string
|
||||||
|
// as both TextMapWriter and TextMapReader.
|
||||||
|
type TextMapCarrier map[string]string
|
||||||
|
|
||||||
|
// ForeachKey conforms to the TextMapReader interface.
|
||||||
|
func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
|
||||||
|
for k, v := range c {
|
||||||
|
if err := handler(k, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set implements Set() of opentracing.TextMapWriter
|
||||||
|
func (c TextMapCarrier) Set(key, val string) {
|
||||||
|
c[key] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
|
||||||
|
//
|
||||||
|
// Example usage for server side:
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
|
||||||
|
//
|
||||||
|
// Example usage for client side:
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// err := tracer.Inject(
|
||||||
|
// span.Context(),
|
||||||
|
// opentracing.HTTPHeaders,
|
||||||
|
// carrier)
|
||||||
|
//
|
||||||
|
type HTTPHeadersCarrier http.Header
|
||||||
|
|
||||||
|
// Set conforms to the TextMapWriter interface.
|
||||||
|
func (c HTTPHeadersCarrier) Set(key, val string) {
|
||||||
|
h := http.Header(c)
|
||||||
|
h.Set(key, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForeachKey conforms to the TextMapReader interface.
|
||||||
|
func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
|
||||||
|
for k, vals := range c {
|
||||||
|
for _, v := range vals {
|
||||||
|
if err := handler(k, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
189
src/vendor/github.com/opentracing/opentracing-go/span.go
generated
vendored
Normal file
189
src/vendor/github.com/opentracing/opentracing-go/span.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/opentracing/opentracing-go/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SpanContext represents Span state that must propagate to descendant Spans and across process
|
||||||
|
// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
|
||||||
|
type SpanContext interface {
|
||||||
|
// ForeachBaggageItem grants access to all baggage items stored in the
|
||||||
|
// SpanContext.
|
||||||
|
// The handler function will be called for each baggage key/value pair.
|
||||||
|
// The ordering of items is not guaranteed.
|
||||||
|
//
|
||||||
|
// The bool return value indicates if the handler wants to continue iterating
|
||||||
|
// through the rest of the baggage items; for example if the handler is trying to
|
||||||
|
// find some baggage item by pattern matching the name, it can return false
|
||||||
|
// as soon as the item is found to stop further iterations.
|
||||||
|
ForeachBaggageItem(handler func(k, v string) bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Span represents an active, un-finished span in the OpenTracing system.
|
||||||
|
//
|
||||||
|
// Spans are created by the Tracer interface.
|
||||||
|
type Span interface {
|
||||||
|
// Sets the end timestamp and finalizes Span state.
|
||||||
|
//
|
||||||
|
// With the exception of calls to Context() (which are always allowed),
|
||||||
|
// Finish() must be the last call made to any span instance, and to do
|
||||||
|
// otherwise leads to undefined behavior.
|
||||||
|
Finish()
|
||||||
|
// FinishWithOptions is like Finish() but with explicit control over
|
||||||
|
// timestamps and log data.
|
||||||
|
FinishWithOptions(opts FinishOptions)
|
||||||
|
|
||||||
|
// Context() yields the SpanContext for this Span. Note that the return
|
||||||
|
// value of Context() is still valid after a call to Span.Finish(), as is
|
||||||
|
// a call to Span.Context() after a call to Span.Finish().
|
||||||
|
Context() SpanContext
|
||||||
|
|
||||||
|
// Sets or changes the operation name.
|
||||||
|
//
|
||||||
|
// Returns a reference to this Span for chaining.
|
||||||
|
SetOperationName(operationName string) Span
|
||||||
|
|
||||||
|
// Adds a tag to the span.
|
||||||
|
//
|
||||||
|
// If there is a pre-existing tag set for `key`, it is overwritten.
|
||||||
|
//
|
||||||
|
// Tag values can be numeric types, strings, or bools. The behavior of
|
||||||
|
// other tag value types is undefined at the OpenTracing level. If a
|
||||||
|
// tracing system does not know how to handle a particular value type, it
|
||||||
|
// may ignore the tag, but shall not panic.
|
||||||
|
//
|
||||||
|
// Returns a reference to this Span for chaining.
|
||||||
|
SetTag(key string, value interface{}) Span
|
||||||
|
|
||||||
|
// LogFields is an efficient and type-checked way to record key:value
|
||||||
|
// logging data about a Span, though the programming interface is a little
|
||||||
|
// more verbose than LogKV(). Here's an example:
|
||||||
|
//
|
||||||
|
// span.LogFields(
|
||||||
|
// log.String("event", "soft error"),
|
||||||
|
// log.String("type", "cache timeout"),
|
||||||
|
// log.Int("waited.millis", 1500))
|
||||||
|
//
|
||||||
|
// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
|
||||||
|
LogFields(fields ...log.Field)
|
||||||
|
|
||||||
|
// LogKV is a concise, readable way to record key:value logging data about
|
||||||
|
// a Span, though unfortunately this also makes it less efficient and less
|
||||||
|
// type-safe than LogFields(). Here's an example:
|
||||||
|
//
|
||||||
|
// span.LogKV(
|
||||||
|
// "event", "soft error",
|
||||||
|
// "type", "cache timeout",
|
||||||
|
// "waited.millis", 1500)
|
||||||
|
//
|
||||||
|
// For LogKV (as opposed to LogFields()), the parameters must appear as
|
||||||
|
// key-value pairs, like
|
||||||
|
//
|
||||||
|
// span.LogKV(key1, val1, key2, val2, key3, val3, ...)
|
||||||
|
//
|
||||||
|
// The keys must all be strings. The values may be strings, numeric types,
|
||||||
|
// bools, Go error instances, or arbitrary structs.
|
||||||
|
//
|
||||||
|
// (Note to implementors: consider the log.InterleavedKVToFields() helper)
|
||||||
|
LogKV(alternatingKeyValues ...interface{})
|
||||||
|
|
||||||
|
// SetBaggageItem sets a key:value pair on this Span and its SpanContext
|
||||||
|
// that also propagates to descendants of this Span.
|
||||||
|
//
|
||||||
|
// SetBaggageItem() enables powerful functionality given a full-stack
|
||||||
|
// opentracing integration (e.g., arbitrary application data from a mobile
|
||||||
|
// app can make it, transparently, all the way into the depths of a storage
|
||||||
|
// system), and with it some powerful costs: use this feature with care.
|
||||||
|
//
|
||||||
|
// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
|
||||||
|
// *future* causal descendants of the associated Span.
|
||||||
|
//
|
||||||
|
// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
|
||||||
|
// value is copied into every local *and remote* child of the associated
|
||||||
|
// Span, and that can add up to a lot of network and cpu overhead.
|
||||||
|
//
|
||||||
|
// Returns a reference to this Span for chaining.
|
||||||
|
SetBaggageItem(restrictedKey, value string) Span
|
||||||
|
|
||||||
|
// Gets the value for a baggage item given its key. Returns the empty string
|
||||||
|
// if the value isn't found in this Span.
|
||||||
|
BaggageItem(restrictedKey string) string
|
||||||
|
|
||||||
|
// Provides access to the Tracer that created this Span.
|
||||||
|
Tracer() Tracer
|
||||||
|
|
||||||
|
// Deprecated: use LogFields or LogKV
|
||||||
|
LogEvent(event string)
|
||||||
|
// Deprecated: use LogFields or LogKV
|
||||||
|
LogEventWithPayload(event string, payload interface{})
|
||||||
|
// Deprecated: use LogFields or LogKV
|
||||||
|
Log(data LogData)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogRecord is data associated with a single Span log. Every LogRecord
|
||||||
|
// instance must specify at least one Field.
|
||||||
|
type LogRecord struct {
|
||||||
|
Timestamp time.Time
|
||||||
|
Fields []log.Field
|
||||||
|
}
|
||||||
|
|
||||||
|
// FinishOptions allows Span.FinishWithOptions callers to override the finish
|
||||||
|
// timestamp and provide log data via a bulk interface.
|
||||||
|
type FinishOptions struct {
|
||||||
|
// FinishTime overrides the Span's finish time, or implicitly becomes
|
||||||
|
// time.Now() if FinishTime.IsZero().
|
||||||
|
//
|
||||||
|
// FinishTime must resolve to a timestamp that's >= the Span's StartTime
|
||||||
|
// (per StartSpanOptions).
|
||||||
|
FinishTime time.Time
|
||||||
|
|
||||||
|
// LogRecords allows the caller to specify the contents of many LogFields()
|
||||||
|
// calls with a single slice. May be nil.
|
||||||
|
//
|
||||||
|
// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
|
||||||
|
// be set explicitly). Also, they must be >= the Span's start timestamp and
|
||||||
|
// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
|
||||||
|
// behavior of FinishWithOptions() is undefined.
|
||||||
|
//
|
||||||
|
// If specified, the caller hands off ownership of LogRecords at
|
||||||
|
// FinishWithOptions() invocation time.
|
||||||
|
//
|
||||||
|
// If specified, the (deprecated) BulkLogData must be nil or empty.
|
||||||
|
LogRecords []LogRecord
|
||||||
|
|
||||||
|
// BulkLogData is DEPRECATED.
|
||||||
|
BulkLogData []LogData
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogData is DEPRECATED
|
||||||
|
type LogData struct {
|
||||||
|
Timestamp time.Time
|
||||||
|
Event string
|
||||||
|
Payload interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
|
||||||
|
func (ld *LogData) ToLogRecord() LogRecord {
|
||||||
|
var literalTimestamp time.Time
|
||||||
|
if ld.Timestamp.IsZero() {
|
||||||
|
literalTimestamp = time.Now()
|
||||||
|
} else {
|
||||||
|
literalTimestamp = ld.Timestamp
|
||||||
|
}
|
||||||
|
rval := LogRecord{
|
||||||
|
Timestamp: literalTimestamp,
|
||||||
|
}
|
||||||
|
if ld.Payload == nil {
|
||||||
|
rval.Fields = []log.Field{
|
||||||
|
log.String("event", ld.Event),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
rval.Fields = []log.Field{
|
||||||
|
log.String("event", ld.Event),
|
||||||
|
log.Object("payload", ld.Payload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rval
|
||||||
|
}
|
304
src/vendor/github.com/opentracing/opentracing-go/tracer.go
generated
vendored
Normal file
304
src/vendor/github.com/opentracing/opentracing-go/tracer.go
generated
vendored
Normal file
@ -0,0 +1,304 @@
|
|||||||
|
package opentracing
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Tracer is a simple, thin interface for Span creation and SpanContext
|
||||||
|
// propagation.
|
||||||
|
type Tracer interface {
|
||||||
|
|
||||||
|
// Create, start, and return a new Span with the given `operationName` and
|
||||||
|
// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
|
||||||
|
// from the "functional options" pattern, per
|
||||||
|
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
|
||||||
|
//
|
||||||
|
// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
|
||||||
|
// opentracing.FollowsFrom()) becomes the root of its own trace.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
//
|
||||||
|
// var tracer opentracing.Tracer = ...
|
||||||
|
//
|
||||||
|
// // The root-span case:
|
||||||
|
// sp := tracer.StartSpan("GetFeed")
|
||||||
|
//
|
||||||
|
// // The vanilla child span case:
|
||||||
|
// sp := tracer.StartSpan(
|
||||||
|
// "GetFeed",
|
||||||
|
// opentracing.ChildOf(parentSpan.Context()))
|
||||||
|
//
|
||||||
|
// // All the bells and whistles:
|
||||||
|
// sp := tracer.StartSpan(
|
||||||
|
// "GetFeed",
|
||||||
|
// opentracing.ChildOf(parentSpan.Context()),
|
||||||
|
// opentracing.Tag{"user_agent", loggedReq.UserAgent},
|
||||||
|
// opentracing.StartTime(loggedReq.Timestamp),
|
||||||
|
// )
|
||||||
|
//
|
||||||
|
StartSpan(operationName string, opts ...StartSpanOption) Span
|
||||||
|
|
||||||
|
// Inject() takes the `sm` SpanContext instance and injects it for
|
||||||
|
// propagation within `carrier`. The actual type of `carrier` depends on
|
||||||
|
// the value of `format`.
|
||||||
|
//
|
||||||
|
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
|
||||||
|
// and each has an expected carrier type.
|
||||||
|
//
|
||||||
|
// Other packages may declare their own `format` values, much like the keys
|
||||||
|
// used by `context.Context` (see https://godoc.org/context#WithValue).
|
||||||
|
//
|
||||||
|
// Example usage (sans error handling):
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// err := tracer.Inject(
|
||||||
|
// span.Context(),
|
||||||
|
// opentracing.HTTPHeaders,
|
||||||
|
// carrier)
|
||||||
|
//
|
||||||
|
// NOTE: All opentracing.Tracer implementations MUST support all
|
||||||
|
// BuiltinFormats.
|
||||||
|
//
|
||||||
|
// Implementations may return opentracing.ErrUnsupportedFormat if `format`
|
||||||
|
// is not supported by (or not known by) the implementation.
|
||||||
|
//
|
||||||
|
// Implementations may return opentracing.ErrInvalidCarrier or any other
|
||||||
|
// implementation-specific error if the format is supported but injection
|
||||||
|
// fails anyway.
|
||||||
|
//
|
||||||
|
// See Tracer.Extract().
|
||||||
|
Inject(sm SpanContext, format interface{}, carrier interface{}) error
|
||||||
|
|
||||||
|
// Extract() returns a SpanContext instance given `format` and `carrier`.
|
||||||
|
//
|
||||||
|
// OpenTracing defines a common set of `format` values (see BuiltinFormat),
|
||||||
|
// and each has an expected carrier type.
|
||||||
|
//
|
||||||
|
// Other packages may declare their own `format` values, much like the keys
|
||||||
|
// used by `context.Context` (see
|
||||||
|
// https://godoc.org/golang.org/x/net/context#WithValue).
|
||||||
|
//
|
||||||
|
// Example usage (with StartSpan):
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
|
||||||
|
// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
|
||||||
|
//
|
||||||
|
// // ... assuming the ultimate goal here is to resume the trace with a
|
||||||
|
// // server-side Span:
|
||||||
|
// var serverSpan opentracing.Span
|
||||||
|
// if err == nil {
|
||||||
|
// span = tracer.StartSpan(
|
||||||
|
// rpcMethodName, ext.RPCServerOption(clientContext))
|
||||||
|
// } else {
|
||||||
|
// span = tracer.StartSpan(rpcMethodName)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// NOTE: All opentracing.Tracer implementations MUST support all
|
||||||
|
// BuiltinFormats.
|
||||||
|
//
|
||||||
|
// Return values:
|
||||||
|
// - A successful Extract returns a SpanContext instance and a nil error
|
||||||
|
// - If there was simply no SpanContext to extract in `carrier`, Extract()
|
||||||
|
// returns (nil, opentracing.ErrSpanContextNotFound)
|
||||||
|
// - If `format` is unsupported or unrecognized, Extract() returns (nil,
|
||||||
|
// opentracing.ErrUnsupportedFormat)
|
||||||
|
// - If there are more fundamental problems with the `carrier` object,
|
||||||
|
// Extract() may return opentracing.ErrInvalidCarrier,
|
||||||
|
// opentracing.ErrSpanContextCorrupted, or implementation-specific
|
||||||
|
// errors.
|
||||||
|
//
|
||||||
|
// See Tracer.Inject().
|
||||||
|
Extract(format interface{}, carrier interface{}) (SpanContext, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
|
||||||
|
// mechanism to override the start timestamp, specify Span References, and make
|
||||||
|
// a single Tag or multiple Tags available at Span start time.
|
||||||
|
//
|
||||||
|
// StartSpan() callers should look at the StartSpanOption interface and
|
||||||
|
// implementations available in this package.
|
||||||
|
//
|
||||||
|
// Tracer implementations can convert a slice of `StartSpanOption` instances
|
||||||
|
// into a `StartSpanOptions` struct like so:
|
||||||
|
//
|
||||||
|
// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
|
||||||
|
// sso := opentracing.StartSpanOptions{}
|
||||||
|
// for _, o := range opts {
|
||||||
|
// o.Apply(&sso)
|
||||||
|
// }
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type StartSpanOptions struct {
|
||||||
|
// Zero or more causal references to other Spans (via their SpanContext).
|
||||||
|
// If empty, start a "root" Span (i.e., start a new trace).
|
||||||
|
References []SpanReference
|
||||||
|
|
||||||
|
// StartTime overrides the Span's start time, or implicitly becomes
|
||||||
|
// time.Now() if StartTime.IsZero().
|
||||||
|
StartTime time.Time
|
||||||
|
|
||||||
|
// Tags may have zero or more entries; the restrictions on map values are
|
||||||
|
// identical to those for Span.SetTag(). May be nil.
|
||||||
|
//
|
||||||
|
// If specified, the caller hands off ownership of Tags at
|
||||||
|
// StartSpan() invocation time.
|
||||||
|
Tags map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
|
||||||
|
//
|
||||||
|
// StartSpanOption borrows from the "functional options" pattern, per
|
||||||
|
// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
|
||||||
|
type StartSpanOption interface {
|
||||||
|
Apply(*StartSpanOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SpanReferenceType is an enum type describing different categories of
|
||||||
|
// relationships between two Spans. If Span-2 refers to Span-1, the
|
||||||
|
// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
|
||||||
|
// ChildOfRef means that Span-1 created Span-2.
|
||||||
|
//
|
||||||
|
// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
|
||||||
|
// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
|
||||||
|
// or Span-2 may be sitting in a distributed queue behind Span-1.
|
||||||
|
type SpanReferenceType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChildOfRef refers to a parent Span that caused *and* somehow depends
|
||||||
|
// upon the new child Span. Often (but not always), the parent Span cannot
|
||||||
|
// finish until the child Span does.
|
||||||
|
//
|
||||||
|
// An timing diagram for a ChildOfRef that's blocked on the new Span:
|
||||||
|
//
|
||||||
|
// [-Parent Span---------]
|
||||||
|
// [-Child Span----]
|
||||||
|
//
|
||||||
|
// See http://opentracing.io/spec/
|
||||||
|
//
|
||||||
|
// See opentracing.ChildOf()
|
||||||
|
ChildOfRef SpanReferenceType = iota
|
||||||
|
|
||||||
|
// FollowsFromRef refers to a parent Span that does not depend in any way
|
||||||
|
// on the result of the new child Span. For instance, one might use
|
||||||
|
// FollowsFromRefs to describe pipeline stages separated by queues,
|
||||||
|
// or a fire-and-forget cache insert at the tail end of a web request.
|
||||||
|
//
|
||||||
|
// A FollowsFromRef Span is part of the same logical trace as the new Span:
|
||||||
|
// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
|
||||||
|
//
|
||||||
|
// All of the following could be valid timing diagrams for children that
|
||||||
|
// "FollowFrom" a parent.
|
||||||
|
//
|
||||||
|
// [-Parent Span-] [-Child Span-]
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// [-Parent Span--]
|
||||||
|
// [-Child Span-]
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// [-Parent Span-]
|
||||||
|
// [-Child Span-]
|
||||||
|
//
|
||||||
|
// See http://opentracing.io/spec/
|
||||||
|
//
|
||||||
|
// See opentracing.FollowsFrom()
|
||||||
|
FollowsFromRef
|
||||||
|
)
|
||||||
|
|
||||||
|
// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
|
||||||
|
// referenced SpanContext. See the SpanReferenceType documentation for
|
||||||
|
// supported relationships. If SpanReference is created with
|
||||||
|
// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
|
||||||
|
// syntax for starting spans:
|
||||||
|
//
|
||||||
|
// sc, _ := tracer.Extract(someFormat, someCarrier)
|
||||||
|
// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
|
||||||
|
//
|
||||||
|
// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
|
||||||
|
// not add the parent span reference to the options.
|
||||||
|
type SpanReference struct {
|
||||||
|
Type SpanReferenceType
|
||||||
|
ReferencedContext SpanContext
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply satisfies the StartSpanOption interface.
|
||||||
|
func (r SpanReference) Apply(o *StartSpanOptions) {
|
||||||
|
if r.ReferencedContext != nil {
|
||||||
|
o.References = append(o.References, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChildOf returns a StartSpanOption pointing to a dependent parent span.
|
||||||
|
// If sc == nil, the option has no effect.
|
||||||
|
//
|
||||||
|
// See ChildOfRef, SpanReference
|
||||||
|
func ChildOf(sc SpanContext) SpanReference {
|
||||||
|
return SpanReference{
|
||||||
|
Type: ChildOfRef,
|
||||||
|
ReferencedContext: sc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
|
||||||
|
// the child Span but does not directly depend on its result in any way.
|
||||||
|
// If sc == nil, the option has no effect.
|
||||||
|
//
|
||||||
|
// See FollowsFromRef, SpanReference
|
||||||
|
func FollowsFrom(sc SpanContext) SpanReference {
|
||||||
|
return SpanReference{
|
||||||
|
Type: FollowsFromRef,
|
||||||
|
ReferencedContext: sc,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartTime is a StartSpanOption that sets an explicit start timestamp for the
|
||||||
|
// new Span.
|
||||||
|
type StartTime time.Time
|
||||||
|
|
||||||
|
// Apply satisfies the StartSpanOption interface.
|
||||||
|
func (t StartTime) Apply(o *StartSpanOptions) {
|
||||||
|
o.StartTime = time.Time(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tags are a generic map from an arbitrary string key to an opaque value type.
|
||||||
|
// The underlying tracing system is responsible for interpreting and
|
||||||
|
// serializing the values.
|
||||||
|
type Tags map[string]interface{}
|
||||||
|
|
||||||
|
// Apply satisfies the StartSpanOption interface.
|
||||||
|
func (t Tags) Apply(o *StartSpanOptions) {
|
||||||
|
if o.Tags == nil {
|
||||||
|
o.Tags = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
for k, v := range t {
|
||||||
|
o.Tags[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tag may be passed as a StartSpanOption to add a tag to new spans,
|
||||||
|
// or its Set method may be used to apply the tag to an existing Span,
|
||||||
|
// for example:
|
||||||
|
//
|
||||||
|
// tracer.StartSpan("opName", Tag{"Key", value})
|
||||||
|
//
|
||||||
|
// or
|
||||||
|
//
|
||||||
|
// Tag{"key", value}.Set(span)
|
||||||
|
type Tag struct {
|
||||||
|
Key string
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply satisfies the StartSpanOption interface.
|
||||||
|
func (t Tag) Apply(o *StartSpanOptions) {
|
||||||
|
if o.Tags == nil {
|
||||||
|
o.Tags = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
o.Tags[t.Key] = t.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set applies the tag to an existing Span.
|
||||||
|
func (t Tag) Set(s Span) {
|
||||||
|
s.SetTag(t.Key, t.Value)
|
||||||
|
}
|
42
src/vendor/modules.txt
vendored
42
src/vendor/modules.txt
vendored
@ -24,6 +24,39 @@ github.com/astaxie/beego/logs
|
|||||||
github.com/astaxie/beego/session
|
github.com/astaxie/beego/session
|
||||||
github.com/astaxie/beego/toolbox
|
github.com/astaxie/beego/toolbox
|
||||||
github.com/astaxie/beego/utils
|
github.com/astaxie/beego/utils
|
||||||
|
# github.com/aws/aws-sdk-go v1.19.47
|
||||||
|
github.com/aws/aws-sdk-go/aws
|
||||||
|
github.com/aws/aws-sdk-go/aws/awserr
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials
|
||||||
|
github.com/aws/aws-sdk-go/aws/session
|
||||||
|
github.com/aws/aws-sdk-go/service/ecr
|
||||||
|
github.com/aws/aws-sdk-go/aws/endpoints
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkio
|
||||||
|
github.com/aws/aws-sdk-go/internal/ini
|
||||||
|
github.com/aws/aws-sdk-go/internal/shareddefaults
|
||||||
|
github.com/aws/aws-sdk-go/aws/client
|
||||||
|
github.com/aws/aws-sdk-go/aws/corehandlers
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/processcreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/stscreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/csm
|
||||||
|
github.com/aws/aws-sdk-go/aws/defaults
|
||||||
|
github.com/aws/aws-sdk-go/aws/request
|
||||||
|
github.com/aws/aws-sdk-go/aws/awsutil
|
||||||
|
github.com/aws/aws-sdk-go/aws/client/metadata
|
||||||
|
github.com/aws/aws-sdk-go/aws/signer/v4
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/jsonrpc
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkrand
|
||||||
|
github.com/aws/aws-sdk-go/service/sts
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
|
||||||
|
github.com/aws/aws-sdk-go/aws/ec2metadata
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/rest
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/query
|
||||||
|
github.com/aws/aws-sdk-go/internal/sdkuri
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/query/queryutil
|
||||||
|
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
|
||||||
# github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
|
# github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
|
||||||
github.com/beego/i18n
|
github.com/beego/i18n
|
||||||
# github.com/bmatcuk/doublestar v1.1.1
|
# github.com/bmatcuk/doublestar v1.1.1
|
||||||
@ -112,6 +145,10 @@ github.com/gorilla/context
|
|||||||
github.com/gorilla/handlers
|
github.com/gorilla/handlers
|
||||||
# github.com/gorilla/mux v1.6.2
|
# github.com/gorilla/mux v1.6.2
|
||||||
github.com/gorilla/mux
|
github.com/gorilla/mux
|
||||||
|
# github.com/graph-gophers/dataloader v5.0.0+incompatible
|
||||||
|
github.com/graph-gophers/dataloader
|
||||||
|
# github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
|
||||||
|
github.com/jmespath/go-jmespath
|
||||||
# github.com/json-iterator/go v1.1.6
|
# github.com/json-iterator/go v1.1.6
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da
|
# github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da
|
||||||
@ -133,6 +170,9 @@ github.com/opencontainers/go-digest
|
|||||||
# github.com/opencontainers/image-spec v1.0.1
|
# github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/opencontainers/image-spec/specs-go/v1
|
github.com/opencontainers/image-spec/specs-go/v1
|
||||||
github.com/opencontainers/image-spec/specs-go
|
github.com/opencontainers/image-spec/specs-go
|
||||||
|
# github.com/opentracing/opentracing-go v1.1.0
|
||||||
|
github.com/opentracing/opentracing-go
|
||||||
|
github.com/opentracing/opentracing-go/log
|
||||||
# github.com/pkg/errors v0.8.1
|
# github.com/pkg/errors v0.8.1
|
||||||
github.com/pkg/errors
|
github.com/pkg/errors
|
||||||
# github.com/pmezard/go-difflib v1.0.0
|
# github.com/pmezard/go-difflib v1.0.0
|
||||||
@ -149,10 +189,10 @@ github.com/spf13/pflag
|
|||||||
# github.com/stretchr/objx v0.1.1
|
# github.com/stretchr/objx v0.1.1
|
||||||
github.com/stretchr/objx
|
github.com/stretchr/objx
|
||||||
# github.com/stretchr/testify v1.3.0
|
# github.com/stretchr/testify v1.3.0
|
||||||
|
github.com/stretchr/testify/mock
|
||||||
github.com/stretchr/testify/assert
|
github.com/stretchr/testify/assert
|
||||||
github.com/stretchr/testify/require
|
github.com/stretchr/testify/require
|
||||||
github.com/stretchr/testify/suite
|
github.com/stretchr/testify/suite
|
||||||
github.com/stretchr/testify/mock
|
|
||||||
# github.com/theupdateframework/notary v0.6.1
|
# github.com/theupdateframework/notary v0.6.1
|
||||||
github.com/theupdateframework/notary
|
github.com/theupdateframework/notary
|
||||||
github.com/theupdateframework/notary/client
|
github.com/theupdateframework/notary/client
|
||||||
|
Loading…
Reference in New Issue
Block a user