mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-23 18:55:18 +01:00
Merge branch 'master' into replication_gcr_1.9
This commit is contained in:
commit
af548e915e
@ -49,7 +49,7 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
|
||||
|
||||
**System requirements:**
|
||||
|
||||
**On a Linux host:** docker 17.03.0-ce+ and docker-compose 1.18.0+ .
|
||||
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ .
|
||||
|
||||
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](docs/installation_guide.md)** to install Harbor.
|
||||
|
||||
|
@ -113,17 +113,24 @@ Notice that you may need to trust the certificate at OS level. Please refer to t
|
||||
|
||||
**3) Configure Harbor**
|
||||
|
||||
Edit the file ```harbor.cfg```, update the hostname and the protocol, and update the attributes ```ssl_cert``` and ```ssl_cert_key```:
|
||||
Edit the file `harbor.yml`, update the hostname and uncomment the https block, and update the attributes `certificate` and `private_key`:
|
||||
|
||||
```yaml
|
||||
#set hostname
|
||||
hostname: yourdomain.com
|
||||
|
||||
http:
|
||||
port: 80
|
||||
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: 443
|
||||
# The path of cert and key files for nginx
|
||||
certificate: /data/cert/yourdomain.com.crt
|
||||
private_key: /data/cert/yourdomain.com.key
|
||||
|
||||
```
|
||||
#set hostname
|
||||
hostname = yourdomain.com:port
|
||||
#set ui_url_protocol
|
||||
ui_url_protocol = https
|
||||
......
|
||||
#The path of cert and key files for nginx, they are applied only the protocol is set to https
|
||||
ssl_cert = /data/cert/yourdomain.com.crt
|
||||
ssl_cert_key = /data/cert/yourdomain.com.key
|
||||
|
||||
```
|
||||
|
||||
Generate configuration files for Harbor:
|
||||
|
@ -30,7 +30,7 @@ Harbor is deployed as several Docker containers, and, therefore, can be deployed
|
||||
|
||||
|Software|Version|Description|
|
||||
|---|---|---|
|
||||
|Docker engine|version 17.03.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|
||||
|Docker engine|version 17.06.0-ce+ or higher|For installation instructions, please refer to: [docker engine doc](https://docs.docker.com/engine/installation/)|
|
||||
|Docker Compose|version 1.18.0 or higher|For installation instructions, please refer to: [docker compose doc](https://docs.docker.com/compose/install/)|
|
||||
|Openssl|latest is preferred|Generate certificate and keys for Harbor|
|
||||
|
||||
|
@ -2,7 +2,7 @@ swagger: '2.0'
|
||||
info:
|
||||
title: Harbor API
|
||||
description: These APIs provide services for manipulating Harbor project.
|
||||
version: 1.8.0
|
||||
version: 1.9.0
|
||||
host: localhost
|
||||
schemes:
|
||||
- http
|
||||
@ -3478,6 +3478,44 @@ paths:
|
||||
description: The robot account is not found.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
'/system/CVEWhitelist':
|
||||
get:
|
||||
summary: Get the system level whitelist of CVE.
|
||||
description: Get the system level whitelist of CVE. This API can be called by all authenticated users.
|
||||
tags:
|
||||
- Products
|
||||
- System
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully retrieved the CVE whitelist.
|
||||
schema:
|
||||
$ref: "#/definitions/CVEWhitelist"
|
||||
'401':
|
||||
description: User is not authenticated.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
put:
|
||||
summary: Update the system level whitelist of CVE.
|
||||
description: This API overwrites the system level whitelist of CVE with the list in request body. Only system Admin
|
||||
has permission to call this API.
|
||||
tags:
|
||||
- Products
|
||||
- System
|
||||
parameters:
|
||||
- in: body
|
||||
name: whitelist
|
||||
description: The whitelist with new content
|
||||
schema:
|
||||
$ref: "#/definitions/CVEWhitelist"
|
||||
responses:
|
||||
'200':
|
||||
description: Successfully updated the CVE whitelist.
|
||||
'401':
|
||||
description: User is not authenticated.
|
||||
'403':
|
||||
description: User does not have permission to call this API.
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
responses:
|
||||
OK:
|
||||
description: 'Success'
|
||||
@ -5070,3 +5108,27 @@ definitions:
|
||||
metadata:
|
||||
type: object
|
||||
description: The metadata of namespace
|
||||
CVEWhitelist:
|
||||
type: object
|
||||
description: The CVE Whitelist for system or project
|
||||
properties:
|
||||
id:
|
||||
type: integer
|
||||
description: ID of the whitelist
|
||||
project_id:
|
||||
type: integer
|
||||
description: ID of the project which the whitelist belongs to. For system level whitelist this attribute is zero.
|
||||
expires_at:
|
||||
type: integer
|
||||
description: the time for expiration of the whitelist, in the form of seconds since epoch. This is an optional attribute, if it's not set the CVE whitelist does not expire.
|
||||
items:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/definitions/CVEWhitelistItem"
|
||||
CVEWhitelistItem:
|
||||
type: object
|
||||
description: The item in CVE whitelist
|
||||
properties:
|
||||
cve_id:
|
||||
type: string
|
||||
description: The ID of the CVE, such as "CVE-2019-10164"
|
||||
|
10
make/migrations/postgresql/0010_1.9.0_schema.up.sql
Normal file
10
make/migrations/postgresql/0010_1.9.0_schema.up.sql
Normal file
@ -0,0 +1,10 @@
|
||||
/* add table for CVE whitelist */
|
||||
CREATE TABLE cve_whitelist (
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
project_id int,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP,
|
||||
expires_at bigint,
|
||||
items text NOT NULL,
|
||||
UNIQUE (project_id)
|
||||
);
|
@ -8,5 +8,6 @@ RUN mkdir /harbor/ \
|
||||
COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/
|
||||
|
||||
RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh
|
||||
RUN mkdir -p /var/log/jobs
|
||||
WORKDIR /harbor/
|
||||
ENTRYPOINT ["/harbor/start.sh"]
|
||||
|
74
src/common/dao/cve_whitelist.go
Normal file
74
src/common/dao/cve_whitelist.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// UpdateCVEWhitelist Updates the vulnerability white list to DB
|
||||
func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
|
||||
o := GetOrmer()
|
||||
itemsBytes, _ := json.Marshal(l.Items)
|
||||
l.ItemsText = string(itemsBytes)
|
||||
id, err := o.InsertOrUpdate(&l, "project_id")
|
||||
return id, err
|
||||
}
|
||||
|
||||
// GetSysCVEWhitelist Gets the system level vulnerability white list from DB
|
||||
func GetSysCVEWhitelist() (*models.CVEWhitelist, error) {
|
||||
return GetCVEWhitelist(0)
|
||||
}
|
||||
|
||||
// UpdateSysCVEWhitelist updates the system level CVE whitelist
|
||||
/*
|
||||
func UpdateSysCVEWhitelist(l models.CVEWhitelist) error {
|
||||
if l.ProjectID != 0 {
|
||||
return fmt.Errorf("system level CVE whitelist cannot set project ID")
|
||||
}
|
||||
l.ProjectID = -1
|
||||
_, err := UpdateCVEWhitelist(l)
|
||||
return err
|
||||
}
|
||||
*/
|
||||
|
||||
// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter
|
||||
func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
|
||||
o := GetOrmer()
|
||||
qs := o.QueryTable(&models.CVEWhitelist{})
|
||||
qs = qs.Filter("ProjectID", pid)
|
||||
r := []*models.CVEWhitelist{}
|
||||
_, err := qs.All(&r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err)
|
||||
}
|
||||
if len(r) == 0 {
|
||||
log.Infof("No CVE whitelist found for project %d, returning empty list.", pid)
|
||||
return &models.CVEWhitelist{ProjectID: pid, Items: []models.CVEWhitelistItem{}}, nil
|
||||
} else if len(r) > 1 {
|
||||
log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r))
|
||||
}
|
||||
items := []models.CVEWhitelistItem{}
|
||||
err = json.Unmarshal([]byte(r[0].ItemsText), &items)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to decode item list, err: %v, text: %s", err, r[0].ItemsText)
|
||||
return nil, err
|
||||
}
|
||||
r[0].Items = items
|
||||
return r[0], nil
|
||||
}
|
72
src/common/dao/cve_whitelist_test.go
Normal file
72
src/common/dao/cve_whitelist_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUpdateAndGetCVEWhitelist(t *testing.T) {
|
||||
require.Nil(t, ClearTable("cve_whitelist"))
|
||||
l, err := GetSysCVEWhitelist()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, models.CVEWhitelist{ProjectID: 0, Items: []models.CVEWhitelistItem{}}, *l)
|
||||
l2, err := GetCVEWhitelist(5)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, models.CVEWhitelist{ProjectID: 5, Items: []models.CVEWhitelistItem{}}, *l2)
|
||||
|
||||
longList := []models.CVEWhitelistItem{}
|
||||
for i := 0; i < 50; i++ {
|
||||
longList = append(longList, models.CVEWhitelistItem{CVEID: "CVE-1999-0067"})
|
||||
}
|
||||
|
||||
e := int64(1573254000)
|
||||
in1 := models.CVEWhitelist{ProjectID: 3, Items: longList, ExpiresAt: &e}
|
||||
_, err = UpdateCVEWhitelist(in1)
|
||||
require.Nil(t, err)
|
||||
// assert.Equal(t, int64(1), n)
|
||||
out1, err := GetCVEWhitelist(3)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, int64(3), out1.ProjectID)
|
||||
assert.Equal(t, longList, out1.Items)
|
||||
assert.Equal(t, e, *out1.ExpiresAt)
|
||||
|
||||
in2 := models.CVEWhitelist{ProjectID: 3, Items: []models.CVEWhitelistItem{}}
|
||||
_, err = UpdateCVEWhitelist(in2)
|
||||
require.Nil(t, err)
|
||||
// assert.Equal(t, int64(1), n2)
|
||||
out2, err := GetCVEWhitelist(3)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, int64(3), out2.ProjectID)
|
||||
assert.Equal(t, []models.CVEWhitelistItem{}, out2.Items)
|
||||
|
||||
sysCVEs := []models.CVEWhitelistItem{
|
||||
{CVEID: "CVE-2019-10164"},
|
||||
{CVEID: "CVE-2017-12345"},
|
||||
}
|
||||
in3 := models.CVEWhitelist{Items: sysCVEs}
|
||||
_, err = UpdateCVEWhitelist(in3)
|
||||
require.Nil(t, err)
|
||||
// assert.Equal(t, int64(1), n3)
|
||||
sysList, err := GetSysCVEWhitelist()
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, int64(0), sysList.ProjectID)
|
||||
assert.Equal(t, sysCVEs, sysList.Items)
|
||||
|
||||
// require.Nil(t, ClearTable("cve_whitelist"))
|
||||
}
|
@ -36,5 +36,6 @@ func init() {
|
||||
new(AdminJob),
|
||||
new(JobLog),
|
||||
new(Robot),
|
||||
new(OIDCUser))
|
||||
new(OIDCUser),
|
||||
new(CVEWhitelist))
|
||||
}
|
||||
|
38
src/common/models/cve_whitelist.go
Normal file
38
src/common/models/cve_whitelist.go
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package models
|
||||
|
||||
import "time"
|
||||
|
||||
// CVEWhitelist defines the data model for a CVE whitelist
|
||||
type CVEWhitelist struct {
|
||||
ID int64 `orm:"pk;auto;column(id)" json:"id"`
|
||||
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
|
||||
ExpiresAt *int64 `orm:"column(expires_at)" json:"expires_at,omitempty"`
|
||||
Items []CVEWhitelistItem `orm:"-" json:"items"`
|
||||
ItemsText string `orm:"column(items)" json:"-"`
|
||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
}
|
||||
|
||||
// CVEWhitelistItem defines one item in the CVE whitelist
|
||||
type CVEWhitelistItem struct {
|
||||
CVEID string `json:"cve_id"`
|
||||
}
|
||||
|
||||
// TableName ...
|
||||
func (r *CVEWhitelist) TableName() string {
|
||||
return "cve_whitelist"
|
||||
}
|
@ -144,6 +144,7 @@ func init() {
|
||||
beego.Router("/api/system/gc/:id([0-9]+)/log", &GCAPI{}, "get:GetLog")
|
||||
beego.Router("/api/system/gc/schedule", &GCAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/scanAll/schedule", &ScanAllAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/CVEWhitelist", &SysCVEWhitelistAPI{}, "get:Get;put:Put")
|
||||
|
||||
beego.Router("/api/projects/:pid([0-9]+)/robots/", &RobotAPI{}, "post:Post;get:List")
|
||||
beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &RobotAPI{}, "get:Get;put:Put;delete:Delete")
|
||||
|
@ -49,6 +49,7 @@ func (t *RegistryAPI) Ping() {
|
||||
ID *int64 `json:"id"`
|
||||
Type *string `json:"type"`
|
||||
URL *string `json:"url"`
|
||||
Region *string `json:"region"`
|
||||
CredentialType *string `json:"credential_type"`
|
||||
AccessKey *string `json:"access_key"`
|
||||
AccessSecret *string `json:"access_secret"`
|
||||
|
@ -332,7 +332,7 @@ func (ra *RepositoryAPI) Delete() {
|
||||
|
||||
go func(tag string) {
|
||||
e := &event.Event{
|
||||
Type: event.EventTypeImagePush,
|
||||
Type: event.EventTypeImageDelete,
|
||||
Resource: &model.Resource{
|
||||
Type: model.ResourceTypeImage,
|
||||
Metadata: &model.ResourceMetadata{
|
||||
|
74
src/core/api/sys_cve_whitelist.go
Normal file
74
src/core/api/sys_cve_whitelist.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SysCVEWhitelistAPI Handles the requests to manage system level CVE whitelist
|
||||
type SysCVEWhitelistAPI struct {
|
||||
BaseController
|
||||
}
|
||||
|
||||
// Prepare validates the request initially
|
||||
func (sca *SysCVEWhitelistAPI) Prepare() {
|
||||
sca.BaseController.Prepare()
|
||||
if !sca.SecurityCtx.IsAuthenticated() {
|
||||
sca.SendUnAuthorizedError(errors.New("Unauthorized"))
|
||||
return
|
||||
}
|
||||
if !sca.SecurityCtx.IsSysAdmin() && sca.Ctx.Request.Method != http.MethodGet {
|
||||
msg := fmt.Sprintf("only system admin has permission issue %s request to this API", sca.Ctx.Request.Method)
|
||||
log.Errorf(msg)
|
||||
sca.SendForbiddenError(errors.New(msg))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get handles the GET request to retrieve the system level CVE whitelist
|
||||
func (sca *SysCVEWhitelistAPI) Get() {
|
||||
l, err := dao.GetSysCVEWhitelist()
|
||||
if err != nil {
|
||||
sca.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
sca.WriteJSONData(l)
|
||||
}
|
||||
|
||||
// Put handles the PUT request to update the system level CVE whitelist
|
||||
func (sca *SysCVEWhitelistAPI) Put() {
|
||||
var l models.CVEWhitelist
|
||||
if err := sca.DecodeJSONReq(&l); err != nil {
|
||||
log.Errorf("Failed to decode JSON array from request")
|
||||
sca.SendBadRequestError(err)
|
||||
return
|
||||
}
|
||||
if l.ProjectID != 0 {
|
||||
msg := fmt.Sprintf("Non-zero project ID for system CVE whitelist: %d.", l.ProjectID)
|
||||
log.Error(msg)
|
||||
sca.SendBadRequestError(errors.New(msg))
|
||||
return
|
||||
}
|
||||
if _, err := dao.UpdateCVEWhitelist(l); err != nil {
|
||||
sca.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
}
|
110
src/core/api/sys_cve_whitelist_test.go
Normal file
110
src/core/api/sys_cve_whitelist_test.go
Normal file
@ -0,0 +1,110 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSysCVEWhitelistAPIGet(t *testing.T) {
|
||||
url := "/api/system/CVEWhitelist"
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: url,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodGet,
|
||||
url: url,
|
||||
credential: nonSysAdmin,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
}
|
||||
|
||||
func TestSysCVEWhitelistAPIPut(t *testing.T) {
|
||||
url := "/api/system/CVEWhitelist"
|
||||
s := int64(1573254000)
|
||||
cases := []*codeCheckingCase{
|
||||
// 401
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
},
|
||||
code: http.StatusUnauthorized,
|
||||
},
|
||||
// 403
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
credential: nonSysAdmin,
|
||||
},
|
||||
code: http.StatusForbidden,
|
||||
},
|
||||
// 400
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
bodyJSON: []string{"CVE-1234-1234"},
|
||||
credential: sysAdmin,
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
// 400
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
bodyJSON: models.CVEWhitelist{
|
||||
ExpiresAt: &s,
|
||||
Items: []models.CVEWhitelistItem{
|
||||
{CVEID: "CVE-2019-12310"},
|
||||
},
|
||||
ProjectID: 2,
|
||||
},
|
||||
credential: sysAdmin,
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
// 200
|
||||
{
|
||||
request: &testingRequest{
|
||||
method: http.MethodPut,
|
||||
url: url,
|
||||
bodyJSON: models.CVEWhitelist{
|
||||
ExpiresAt: &s,
|
||||
Items: []models.CVEWhitelistItem{
|
||||
{CVEID: "CVE-2019-12310"},
|
||||
},
|
||||
},
|
||||
credential: sysAdmin,
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
}
|
||||
runCodeCheckingCases(t, cases...)
|
||||
}
|
@ -96,6 +96,7 @@ func initRouters() {
|
||||
beego.Router("/api/system/gc/:id([0-9]+)/log", &api.GCAPI{}, "get:GetLog")
|
||||
beego.Router("/api/system/gc/schedule", &api.GCAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/scanAll/schedule", &api.ScanAllAPI{}, "get:Get;put:Put;post:Post")
|
||||
beego.Router("/api/system/CVEWhitelist", &api.SysCVEWhitelistAPI{}, "get:Get;put:Put")
|
||||
|
||||
beego.Router("/api/logs", &api.LogAPI{})
|
||||
|
||||
|
@ -10,6 +10,7 @@ require (
|
||||
github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
|
||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||
github.com/astaxie/beego v1.9.0
|
||||
github.com/aws/aws-sdk-go v1.19.47
|
||||
github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||
github.com/bitly/go-simplejson v0.5.0 // indirect
|
||||
|
@ -25,6 +25,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/astaxie/beego v1.9.0 h1:tPzS+D1oCLi+SEb/TLNRNYpCjaMVfAGoy9OTLwS5ul4=
|
||||
github.com/astaxie/beego v1.9.0/go.mod h1:0R4++1tUqERR0WYFWdfkcrsyoVBCG4DgpDGokT3yb+U=
|
||||
github.com/aws/aws-sdk-go v1.19.47 h1:ZEze0mpk8Fttrsz6UNLqhH/jRGYbMPfWFA2ILas4AmM=
|
||||
github.com/aws/aws-sdk-go v1.19.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0 h1:fQaDnUQvBXHHQdGBu9hz8nPznB4BeiPQokvmQVjmNEw=
|
||||
github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0/go.mod h1:KLeFCpAMq2+50NkXC8iiJxLLiiTfTqrGtKEVm+2fk7s=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@ -158,6 +160,8 @@ github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYX
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.0.0 h1:6WV8LvwPpDhKjo5U9O6b4+xdG/jTXNPwlDme/MTo8Ns=
|
||||
github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
|
@ -36,6 +36,8 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/huawei"
|
||||
// register the Google Gcr adapter
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/googlegcr"
|
||||
// register the AwsEcr adapter
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/awsecr"
|
||||
)
|
||||
|
||||
// Replication implements the job interface
|
||||
|
@ -188,11 +188,23 @@ func (suite *CWorkerTestSuite) TestStopJob() {
|
||||
t, err := suite.lcmCtl.New(genericJob)
|
||||
require.NoError(suite.T(), err, "new job stats: nil error expected but got %s", err)
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
tk := time.NewTicker(500 * time.Millisecond)
|
||||
defer tk.Stop()
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-tk.C:
|
||||
latest, err := t.Status()
|
||||
require.NoError(suite.T(), err, "get latest status: nil error expected but got %s", err)
|
||||
assert.EqualValues(suite.T(), job.RunningStatus, latest, "expect job is running now")
|
||||
if latest.Compare(job.RunningStatus) == 0 {
|
||||
break LOOP
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
require.NoError(suite.T(), errors.New("check running status time out"))
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
|
||||
err = suite.cWorker.StopJob(genericJob.Info.JobID)
|
||||
require.NoError(suite.T(), err, "stop job: nil error expected but got %s", err)
|
||||
@ -255,7 +267,7 @@ func (j *fakeLongRunJob) Validate(params job.Parameters) error {
|
||||
}
|
||||
|
||||
func (j *fakeLongRunJob) Run(ctx job.Context, params job.Parameters) error {
|
||||
time.Sleep(5 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
if _, stopped := ctx.OPCommand(); stopped {
|
||||
return nil
|
||||
|
@ -48,6 +48,7 @@
|
||||
<div class="form-select">
|
||||
<div class="select endpointSelect pull-left">
|
||||
<select id="src_registry_id" (change)="sourceChange($event)" formControlName="src_registry" [compareWith]="equals">
|
||||
<option class="display-none"></option>
|
||||
<option *ngFor="let source of sourceList" [ngValue]="source">{{source.name}}-{{source.url}}</option>
|
||||
</select>
|
||||
</div>
|
||||
@ -97,6 +98,7 @@
|
||||
<div class="form-select">
|
||||
<div class="select endpointSelect pull-left">
|
||||
<select id="dest_registry" (change)="targetChange($event)" formControlName="dest_registry" [compareWith]="equals">
|
||||
<option class="display-none"></option>
|
||||
<option *ngFor="let target of targetList" [ngValue]="target">{{target.name}}-{{target.url}}</option>
|
||||
</select>
|
||||
</div>
|
||||
|
@ -265,3 +265,7 @@ clr-modal {
|
||||
width: 20rem;
|
||||
}
|
||||
}
|
||||
|
||||
.display-none{
|
||||
display: none;
|
||||
}
|
@ -31,6 +31,9 @@ export const errorHandler = function (error: any): string {
|
||||
if (error.error && error.error.message) {
|
||||
return error.error.message;
|
||||
}
|
||||
if (error.message) {
|
||||
return error.message;
|
||||
}
|
||||
|
||||
if (!(error.statusCode || error.status)) {
|
||||
// treat as string message
|
||||
|
@ -104,7 +104,6 @@ export class AuditLogComponent implements OnInit {
|
||||
this.auditLogs = response.body;
|
||||
},
|
||||
error => {
|
||||
this.router.navigate(['/harbor', 'projects']);
|
||||
this.messageHandlerService.handleError(error);
|
||||
}
|
||||
);
|
||||
|
@ -129,15 +129,16 @@ export class MemberComponent implements OnInit, OnDestroy {
|
||||
this.selectedRow = [];
|
||||
this.memberService
|
||||
.listMembers(projectId, username).pipe(
|
||||
finalize(() => this.loading = false))
|
||||
finalize(() => {
|
||||
this.loading = false;
|
||||
let hnd = setInterval(() => this.ref.markForCheck(), 100);
|
||||
setTimeout(() => clearInterval(hnd), 1000);
|
||||
}))
|
||||
.subscribe(
|
||||
response => {
|
||||
this.members = response;
|
||||
let hnd = setInterval(() => this.ref.markForCheck(), 100);
|
||||
setTimeout(() => clearInterval(hnd), 1000);
|
||||
},
|
||||
error => {
|
||||
this.router.navigate(["/harbor", "projects"]);
|
||||
this.messageHandlerService.handleError(error);
|
||||
});
|
||||
}
|
||||
|
174
src/replication/adapter/awsecr/adapter.go
Normal file
174
src/replication/adapter/awsecr/adapter.go
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package awsecr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
awsecrapi "github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/common/utils/registry"
|
||||
adp "github.com/goharbor/harbor/src/replication/adapter"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"net/http"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := adp.RegisterFactory(model.RegistryTypeAwsEcr, func(registry *model.Registry) (adp.Adapter, error) {
|
||||
return newAdapter(registry)
|
||||
}); err != nil {
|
||||
log.Errorf("failed to register factory for %s: %v", model.RegistryTypeAwsEcr, err)
|
||||
return
|
||||
}
|
||||
log.Infof("the factory for adapter %s registered", model.RegistryTypeAwsEcr)
|
||||
}
|
||||
|
||||
func newAdapter(registry *model.Registry) (*adapter, error) {
|
||||
region, err := parseRegion(registry.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authorizer := NewAuth(region, registry.Credential.AccessKey, registry.Credential.AccessSecret, registry.Insecure)
|
||||
reg, err := adp.NewDefaultImageRegistryWithCustomizedAuthorizer(registry, authorizer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &adapter{
|
||||
registry: registry,
|
||||
DefaultImageRegistry: reg,
|
||||
region: region,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseRegion(url string) (string, error) {
|
||||
pattern := "https://(?:api|\\d+\\.dkr)\\.ecr\\.([\\w\\-]+)\\.amazonaws\\.com"
|
||||
rs := regexp.MustCompile(pattern).FindStringSubmatch(url)
|
||||
if rs == nil {
|
||||
return "", errors.New("Bad aws url")
|
||||
}
|
||||
return rs[1], nil
|
||||
}
|
||||
|
||||
type adapter struct {
|
||||
*adp.DefaultImageRegistry
|
||||
registry *model.Registry
|
||||
region string
|
||||
forceEndpoint *string
|
||||
}
|
||||
|
||||
func (*adapter) Info() (info *model.RegistryInfo, err error) {
|
||||
return &model.RegistryInfo{
|
||||
Type: model.RegistryTypeAwsEcr,
|
||||
SupportedResourceTypes: []model.ResourceType{
|
||||
model.ResourceTypeImage,
|
||||
},
|
||||
SupportedResourceFilters: []*model.FilterStyle{
|
||||
{
|
||||
Type: model.FilterTypeName,
|
||||
Style: model.FilterStyleTypeText,
|
||||
},
|
||||
{
|
||||
Type: model.FilterTypeTag,
|
||||
Style: model.FilterStyleTypeText,
|
||||
},
|
||||
},
|
||||
SupportedTriggers: []model.TriggerType{
|
||||
model.TriggerTypeManual,
|
||||
model.TriggerTypeScheduled,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HealthCheck checks health status of a registry
|
||||
func (a *adapter) HealthCheck() (model.HealthStatus, error) {
|
||||
if a.registry.Credential == nil ||
|
||||
len(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {
|
||||
log.Errorf("no credential to ping registry %s", a.registry.URL)
|
||||
return model.Unhealthy, nil
|
||||
}
|
||||
if err := a.PingGet(); err != nil {
|
||||
log.Errorf("failed to ping registry %s: %v", a.registry.URL, err)
|
||||
return model.Unhealthy, nil
|
||||
}
|
||||
return model.Healthy, nil
|
||||
}
|
||||
|
||||
// PrepareForPush nothing need to do.
|
||||
func (a *adapter) PrepareForPush(resources []*model.Resource) error {
|
||||
for _, resource := range resources {
|
||||
if resource == nil {
|
||||
return errors.New("the resource cannot be nil")
|
||||
}
|
||||
if resource.Metadata == nil {
|
||||
return errors.New("the metadata of resource cannot be nil")
|
||||
}
|
||||
if resource.Metadata.Repository == nil {
|
||||
return errors.New("the namespace of resource cannot be nil")
|
||||
}
|
||||
if len(resource.Metadata.Repository.Name) == 0 {
|
||||
return errors.New("the name of the namespace cannot be nil")
|
||||
}
|
||||
|
||||
err := a.createRepository(resource.Metadata.Repository.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *adapter) createRepository(repository string) error {
|
||||
if a.registry.Credential == nil ||
|
||||
len(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {
|
||||
return errors.New("no credential ")
|
||||
}
|
||||
cred := credentials.NewStaticCredentials(
|
||||
a.registry.Credential.AccessKey,
|
||||
a.registry.Credential.AccessSecret,
|
||||
"")
|
||||
if a.region == "" {
|
||||
return errors.New("no region parsed")
|
||||
}
|
||||
config := &aws.Config{
|
||||
Credentials: cred,
|
||||
Region: &a.region,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: registry.GetHTTPTransport(a.registry.Insecure),
|
||||
},
|
||||
}
|
||||
if a.forceEndpoint != nil {
|
||||
config.Endpoint = a.forceEndpoint
|
||||
}
|
||||
sess := session.Must(session.NewSession(config))
|
||||
|
||||
svc := awsecrapi.New(sess)
|
||||
|
||||
_, err := svc.CreateRepository(&awsecrapi.CreateRepositoryInput{
|
||||
RepositoryName: &repository,
|
||||
})
|
||||
if err != nil {
|
||||
if e, ok := err.(awserr.Error); ok {
|
||||
if e.Code() == awsecrapi.ErrCodeRepositoryAlreadyExistsException {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
252
src/replication/adapter/awsecr/adapter_test.go
Normal file
252
src/replication/adapter/awsecr/adapter_test.go
Normal file
@ -0,0 +1,252 @@
|
||||
package awsecr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/goharbor/harbor/src/common/utils/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
adp "github.com/goharbor/harbor/src/replication/adapter"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
)
|
||||
|
||||
func TestAdapter_NewAdapter(t *testing.T) {
|
||||
factory, err := adp.GetFactory("BadName")
|
||||
assert.Nil(t, factory)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
factory, err = adp.GetFactory(model.RegistryTypeAwsEcr)
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, factory)
|
||||
|
||||
adapter, err := factory(&model.Registry{
|
||||
Type: model.RegistryTypeAwsEcr,
|
||||
Credential: &model.Credential{
|
||||
AccessKey: "xxx",
|
||||
AccessSecret: "ppp",
|
||||
},
|
||||
URL: "https://api.ecr.test-region.amazonaws.com",
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, adapter)
|
||||
|
||||
adapter, err = factory(&model.Registry{
|
||||
Type: model.RegistryTypeAwsEcr,
|
||||
Credential: &model.Credential{
|
||||
AccessKey: "xxx",
|
||||
AccessSecret: "ppp",
|
||||
},
|
||||
URL: "https://123456.dkr.ecr.test-region.amazonaws.com",
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, adapter)
|
||||
|
||||
adapter, err = factory(&model.Registry{
|
||||
Type: model.RegistryTypeAwsEcr,
|
||||
Credential: &model.Credential{
|
||||
AccessKey: "xxx",
|
||||
AccessSecret: "ppp",
|
||||
},
|
||||
})
|
||||
assert.Nil(t, adapter)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
}
|
||||
|
||||
func getMockAdapter(t *testing.T, hasCred, health bool) (*adapter, *httptest.Server) {
|
||||
server := test.NewServer(
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodGet,
|
||||
Pattern: "/v2/_catalog",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`
|
||||
{
|
||||
"repositories": [
|
||||
"test1"
|
||||
]
|
||||
}`))
|
||||
},
|
||||
},
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodGet,
|
||||
Pattern: "/v2/{repo}/tags/list",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(`
|
||||
{
|
||||
"name": "test1",
|
||||
"tags": [
|
||||
"latest"
|
||||
]
|
||||
}`))
|
||||
},
|
||||
},
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodGet,
|
||||
Pattern: "/v2/",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println(r.Method, r.URL)
|
||||
if health {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
} else {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}
|
||||
},
|
||||
},
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodGet,
|
||||
Pattern: "/",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println(r.Method, r.URL)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
},
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodPost,
|
||||
Pattern: "/",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println(r.Method, r.URL)
|
||||
if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
|
||||
fmt.Println("\t", string(buf))
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
registry := &model.Registry{
|
||||
Type: model.RegistryTypeAwsEcr,
|
||||
URL: server.URL,
|
||||
}
|
||||
if hasCred {
|
||||
registry.Credential = &model.Credential{
|
||||
AccessKey: "xxx",
|
||||
AccessSecret: "ppp",
|
||||
}
|
||||
}
|
||||
reg, err := adp.NewDefaultImageRegistry(registry)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &adapter{
|
||||
registry: registry,
|
||||
DefaultImageRegistry: reg,
|
||||
region: "test-region",
|
||||
forceEndpoint: &server.URL,
|
||||
}, server
|
||||
}
|
||||
|
||||
func TestAdapter_Info(t *testing.T) {
|
||||
a, s := getMockAdapter(t, true, true)
|
||||
defer s.Close()
|
||||
info, err := a.Info()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, info)
|
||||
|
||||
assert.EqualValues(t, 1, len(info.SupportedResourceTypes))
|
||||
assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
|
||||
}
|
||||
|
||||
func TestAdapter_HealthCheck(t *testing.T) {
|
||||
a, s := getMockAdapter(t, false, true)
|
||||
defer s.Close()
|
||||
status, err := a.HealthCheck()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.EqualValues(t, model.Unhealthy, status)
|
||||
|
||||
a, s = getMockAdapter(t, true, false)
|
||||
defer s.Close()
|
||||
status, err = a.HealthCheck()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.EqualValues(t, model.Unhealthy, status)
|
||||
|
||||
a, s = getMockAdapter(t, true, true)
|
||||
defer s.Close()
|
||||
status, err = a.HealthCheck()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, status)
|
||||
assert.EqualValues(t, model.Healthy, status)
|
||||
}
|
||||
|
||||
func TestAdapter_PrepareForPush(t *testing.T) {
|
||||
a, s := getMockAdapter(t, true, true)
|
||||
defer s.Close()
|
||||
resources := []*model.Resource{
|
||||
{
|
||||
Type: model.ResourceTypeImage,
|
||||
Metadata: &model.ResourceMetadata{
|
||||
Repository: &model.Repository{
|
||||
Name: "busybox",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := a.PrepareForPush(resources)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestAdapter_FetchImages(t *testing.T) {
|
||||
a, s := getMockAdapter(t, true, true)
|
||||
defer s.Close()
|
||||
resources, err := a.FetchImages([]*model.Filter{
|
||||
{
|
||||
Type: model.FilterTypeName,
|
||||
Value: "*",
|
||||
},
|
||||
{
|
||||
Type: model.FilterTypeTag,
|
||||
Value: "*",
|
||||
},
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, resources)
|
||||
assert.Equal(t, 1, len(resources))
|
||||
}
|
||||
|
||||
func TestAwsAuthCredential_Modify(t *testing.T) {
|
||||
et := time.Now().Add(time.Second).Unix()
|
||||
server := test.NewServer(
|
||||
&test.RequestHandlerMapping{
|
||||
Method: http.MethodPost,
|
||||
Pattern: "/",
|
||||
Handler: func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Println(r.Method, r.URL)
|
||||
if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
|
||||
fmt.Println("\t", string(buf))
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(fmt.Sprintf(`
|
||||
{
|
||||
"authorizationData" : [
|
||||
{
|
||||
"expiresAt" : %d,
|
||||
"proxyEndpoint" : "https://12345.dkr.ecr.ap-northeast-1.amazonaws.com",
|
||||
"authorizationToken" : "QVdTOmV5SndZWGxzYjJGa0lqb2llRlJNTkdSbWMyZE5RM0pXYWtoVWRrdzRNVVJIT1d0NFNXRlJjVEpET0c5cVZVUlFWRUkxUVVoS1ZDOVJjbXBIV0d4RlN6ZFZlR0UxTnk5TVdVSXdSU3RyVlRBNVRrSnVXbmhoUVdKaFlVZzFOV3d3YzJ4RVNIcHdZVWRZWTA4dmVGbHFjakphV1VKaE1YUlVkMU5JV2xWU1UxbzNSaTlKTHpaMFlXaFVPV1pXTldoelRXcFZiQ3R1SzBndlptdEtWMmMxYW5wclJrTXpkRXgzWkd4MFdWaE1PREZzV1dGWGQzVjJkbG94YkZKbFVrRnBhbVZYU1cxRksyRk9WM3AzVm1jM1N6aFNTMmhvYzBkRlFXNXRRbEJ4WkRGTVNuRkpjR2hUTldaNmJrazFNWEpPWmtwNU1WUnRMMVZNVFZZMVNYVkJjV1ZHYzA5MFUycEhkRTlFWVhsdGNrVlFXamhYUTBkR05YRklTWFE0UmtSTWNGQllPWFZoYW14NmNrbENkamROVkRsVk1UWlpkVlJpZDFaSWJYRjZPRGQ0VDNKdGVIaFRSR0Z2TDNCVU5qUXhja2w1YkhwUFJHUmpUMEpWWVdGUmRsWnpjak5TUzFaaFpEUmhaVkJ3ZUNzMGVYa3dhR2ROZW5sd1RVWkdRMkV3ZEVveU5HeEVNVVpUTWtkVWFXRlhMemMyVlRoTE9WWndNMEZ6WjFWaU4zbHZZbmhaY2tNNGFqVTRiMlJ4WlVWV01GbEtMekJTWWxSU1FYTjJiM1JEV1VzcllrSlJMM3BOUldjelN6UnpNa0lySzBGclIyUTJNM2MyZEd0VUx6VnBVWFZYY1UxdGJXcGxZMVF2TlRGc04wRm9UMFJzYlRKME5rbzRUamhvTXpkbWJVOWlMMkpXWTA1a09GWTVTM0ptWmtGQk1HTllSVTE1UWk5T1RIcHlaMEpwWWtOUmFFdHNiSEJTYlU5YVNHOHpWemxpV1haTmMzcHJVM0Z0YkU5clpUQmxjbU5GVTAxS1oycHRNV1Z5TDBKMlJHbGxVVEJDTWpSWVpHRjNLMDlGWkUxeVExTlRORll2ZDFFM1dWUXJRVFY0SzJScWNHWmhiREI1UkN0YVltOXdUelF3YlZBNFpXSlpLMUkxYld4VGRDOU9NblpxVDA1clMxbE9aemh3WVUxbVVFVjVja3BXT0ZSME4zUlVPR2MzV0ZaS1RVOVJOSEpqUlVaV09HbHBSVE5LVFRGc1RqRXljSGxOVFVGbk5sbGtNM3A0UW1OWFZrWkhRM1ZuVEhZNU1DOVhRVVJtTlV4TWFHbHFXRTh6YzNFd1dVaGtWRUkzUjBObVdtaGlla3huWjJsT2FVRXZXa2tyU1hSWlZUQnFUR2xxVGxoV1ZEWmFiRGcwZVdzM2IzWTVOVnBhZVUxUVFteHdjVkJLYkVsbU0yWkdWamc0UXpJdmVtTnNSeXQxYW1kb1VYVkxkM0U1ZEROdlVGZ3piaXREUm5oMVFqTnVTREZDVURGVlVVbEtVbGx5UlRaRUt6TkJPRWM1UVVabVVIRkNkMVZUWmpCSE5qbHlhM056YlhKdU1XMTVUa3RWZFZCemRETkplREpaY214ckswZFBOazB4ZG5GU2JsSXJUVTFUWmxSMmFtRlFOMXBEVW5CQmVFWTJZeXN5VlZKWVJrdElObEkyVDNCcGJFSktRV3N4UkhBMGRFNVBiVzAyYzJsalRFWjBjek0zTm1OUGNWWTRUMjkwVldZeGVrRjVZVGgxTlc5VWRGUkRUemcxZVVKVWFXNXNkMVZ1WmpZcmNtOHljVXRoVUZGWFdVZzNhamhWWkVaS05EUXdMMHRzVEdwNVlXSmlia1ZJTjNsRVpGRnhXRnBQTkVNeFptRlNZeTgyYUVwdEsxQXZXSEJETXpaSE0zTk9iWGgySzJKQlJHUXlUakZVYm1JMVJFZElZVVJTY2tsYU1uWkNiMHBRUW5GYVUwbGhRazV4YkZWUldWQjNUVEpEVjJzdlVVRTRlRVJDTjNsRlIwTnFSWFJuUFQwaUxDSmtZWFJoYTJWNUlqb2lRVkZGUWtGSVowRk5aa3RFYkVsdmNFTTJlbk13WWsxa1VuSlpVMGhoTDBNek9XdERjbU5RT0d0V2NISkZPV1lyYTFGQlFVRklOSGRtUVZsS1MyOWFTV2gyWTA1QlVXTkhiMGM0ZDJKUlNVSkJSRUp2UW1kcmNXaHJhVWM1ZHpCQ1FuZEZkMGhuV1VwWlNWcEpRVmRWUkVKQlJYVk5Ra1ZGUkVVNVpESnBPVVJNVUZrek5Ga3JTMmRCU1VKRlNVRTNNVnA2T1c1eVQzSjNVMnBhUW1Wc1YyOTBNRUpwY0VwbVoyTkhhbU5FU3k5WVEwY3JNSGxvTDFFNVpuZ3pVemc0WjFVMFQxQkVabVpVV1d4UFRUQTVPSGhvUjJWWmJscEZRV3hyZUN0ek1EMGlMQ0oyWlhKemFXOXVJam9pTWlJc0luUjVjR1VpT2lKRVFWUkJYMHRGV1NJc0ltVjRjR2x5WVhScGIyNGlPakUxTmpFME9EVXpNemg5"
|
||||
}
|
||||
]
|
||||
}
|
||||
`, et)))
|
||||
},
|
||||
},
|
||||
)
|
||||
defer server.Close()
|
||||
a, _ := NewAuth("test-region", "xxx", "ppp", true).(*awsAuthCredential)
|
||||
a.forceEndpoint = &server.URL
|
||||
req := httptest.NewRequest(http.MethodGet, "https://1234.dkr.ecr.test-region.amazonaws.com/v2/", nil)
|
||||
err := a.Modify(req)
|
||||
assert.Nil(t, err)
|
||||
err = a.Modify(req)
|
||||
assert.Nil(t, err)
|
||||
time.Sleep(time.Second)
|
||||
err = a.Modify(req)
|
||||
assert.Nil(t, err)
|
||||
}
|
163
src/replication/adapter/awsecr/auth.go
Normal file
163
src/replication/adapter/awsecr/auth.go
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package awsecr
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
awsecrapi "github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/common/utils/registry"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Credential ...
|
||||
type Credential modifier.Modifier
|
||||
|
||||
// Implements interface Credential
|
||||
type awsAuthCredential struct {
|
||||
region string
|
||||
accessKey string
|
||||
accessSecret string
|
||||
insecure bool
|
||||
forceEndpoint *string
|
||||
|
||||
cacheToken *cacheToken
|
||||
cacheExpired *time.Time
|
||||
}
|
||||
|
||||
type cacheToken struct {
|
||||
endpoint string
|
||||
user string
|
||||
password string
|
||||
host string
|
||||
}
|
||||
|
||||
// DefaultCacheExpiredTime is expired timeout for aws auth token
|
||||
const DefaultCacheExpiredTime = time.Hour * 1
|
||||
|
||||
func (a *awsAuthCredential) Modify(req *http.Request) error {
|
||||
// url maybe redirect to s3
|
||||
if !strings.Contains(req.URL.Host, ".ecr.") {
|
||||
return nil
|
||||
}
|
||||
if !a.isTokenValid() {
|
||||
endpoint, user, pass, expiresAt, err := a.getAuthorization()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.cacheToken = &cacheToken{}
|
||||
a.cacheToken.host = u.Host
|
||||
a.cacheToken.user = user
|
||||
a.cacheToken.password = pass
|
||||
a.cacheToken.endpoint = endpoint
|
||||
t := time.Now().Add(DefaultCacheExpiredTime)
|
||||
if t.Before(*expiresAt) {
|
||||
a.cacheExpired = &t
|
||||
} else {
|
||||
a.cacheExpired = expiresAt
|
||||
}
|
||||
}
|
||||
req.Host = a.cacheToken.host
|
||||
req.URL.Host = a.cacheToken.host
|
||||
req.SetBasicAuth(a.cacheToken.user, a.cacheToken.password)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *awsAuthCredential) getAuthorization() (string, string, string, *time.Time, error) {
|
||||
log.Infof("Aws Ecr getAuthorization %s", a.accessKey)
|
||||
cred := credentials.NewStaticCredentials(
|
||||
a.accessKey,
|
||||
a.accessSecret,
|
||||
"")
|
||||
config := &aws.Config{
|
||||
Credentials: cred,
|
||||
Region: &a.region,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: registry.GetHTTPTransport(a.insecure),
|
||||
},
|
||||
}
|
||||
if a.forceEndpoint != nil {
|
||||
config.Endpoint = a.forceEndpoint
|
||||
}
|
||||
sess, err := session.NewSession(config)
|
||||
if err != nil {
|
||||
return "", "", "", nil, err
|
||||
}
|
||||
|
||||
svc := awsecrapi.New(sess)
|
||||
|
||||
result, err := svc.GetAuthorizationToken(nil)
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
return "", "", "", nil, fmt.Errorf("%s: %s", aerr.Code(), aerr.Error())
|
||||
}
|
||||
|
||||
return "", "", "", nil, err
|
||||
}
|
||||
|
||||
// Double check
|
||||
if len(result.AuthorizationData) == 0 {
|
||||
return "", "", "", nil, errors.New("no authorization token returned")
|
||||
}
|
||||
|
||||
theOne := result.AuthorizationData[0]
|
||||
expiresAt := theOne.ExpiresAt
|
||||
payload, _ := base64.StdEncoding.DecodeString(*theOne.AuthorizationToken)
|
||||
pair := strings.SplitN(string(payload), ":", 2)
|
||||
|
||||
log.Debugf("Aws Ecr getAuthorization %s result: %d %s...", a.accessKey, len(pair[1]), pair[1][:25])
|
||||
|
||||
return *(theOne.ProxyEndpoint), pair[0], pair[1], expiresAt, nil
|
||||
}
|
||||
|
||||
func (a *awsAuthCredential) isTokenValid() bool {
|
||||
if a.cacheToken == nil {
|
||||
return false
|
||||
}
|
||||
if a.cacheExpired == nil {
|
||||
return false
|
||||
}
|
||||
if time.Now().After(*a.cacheExpired) {
|
||||
a.cacheExpired = nil
|
||||
a.cacheToken = nil
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NewAuth new aws auth
|
||||
func NewAuth(region, accessKey, accessSecret string, insecure bool) Credential {
|
||||
return &awsAuthCredential{
|
||||
region: region,
|
||||
accessKey: accessKey,
|
||||
accessSecret: accessSecret,
|
||||
insecure: insecure,
|
||||
}
|
||||
}
|
113
src/replication/adapter/awsecr/image_registry.go
Normal file
113
src/replication/adapter/awsecr/image_registry.go
Normal file
@ -0,0 +1,113 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package awsecr
|
||||
|
||||
import (
|
||||
adp "github.com/goharbor/harbor/src/replication/adapter"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"github.com/goharbor/harbor/src/replication/util"
|
||||
)
|
||||
|
||||
var _ adp.ImageRegistry = adapter{}
|
||||
|
||||
func (a adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error) {
|
||||
nameFilterPattern := ""
|
||||
tagFilterPattern := ""
|
||||
for _, filter := range filters {
|
||||
switch filter.Type {
|
||||
case model.FilterTypeName:
|
||||
nameFilterPattern = filter.Value.(string)
|
||||
case model.FilterTypeTag:
|
||||
tagFilterPattern = filter.Value.(string)
|
||||
}
|
||||
}
|
||||
repositories, err := a.filterRepositories(nameFilterPattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resources []*model.Resource
|
||||
for _, repository := range repositories {
|
||||
tags, err := a.filterTags(repository, tagFilterPattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
continue
|
||||
}
|
||||
resources = append(resources, &model.Resource{
|
||||
Type: model.ResourceTypeImage,
|
||||
Registry: a.registry,
|
||||
Metadata: &model.ResourceMetadata{
|
||||
Repository: &model.Repository{
|
||||
Name: repository,
|
||||
},
|
||||
Vtags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (a adapter) filterRepositories(pattern string) ([]string, error) {
|
||||
// if the pattern is a specific repository name, just returns the parsed repositories
|
||||
// and will check the existence later when filtering the tags
|
||||
if repositories, ok := util.IsSpecificPath(pattern); ok {
|
||||
return repositories, nil
|
||||
}
|
||||
// search repositories from catalog api
|
||||
repositories, err := a.Catalog()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if the pattern is null, just return the result of catalog API
|
||||
if len(pattern) == 0 {
|
||||
return repositories, nil
|
||||
}
|
||||
result := []string{}
|
||||
for _, repository := range repositories {
|
||||
match, err := util.Match(pattern, repository)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
result = append(result, repository)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (a adapter) filterTags(repository, pattern string) ([]string, error) {
|
||||
tags, err := a.ListTag(repository)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pattern) == 0 {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
var result []string
|
||||
for _, tag := range tags {
|
||||
match, err := util.Match(pattern, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
result = append(result, tag)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
@ -97,11 +97,6 @@ func (a *adapter) Info() (*model.RegistryInfo, error) {
|
||||
Type: model.FilterTypeTag,
|
||||
Style: model.FilterStyleTypeText,
|
||||
},
|
||||
// TODO add support for label filter
|
||||
// {
|
||||
// Type: model.FilterTypeLabel,
|
||||
// Style: model.FilterStyleTypeText,
|
||||
// },
|
||||
},
|
||||
SupportedTriggers: []model.TriggerType{
|
||||
model.TriggerTypeManual,
|
||||
@ -118,6 +113,26 @@ func (a *adapter) Info() (*model.RegistryInfo, error) {
|
||||
if sys.ChartRegistryEnabled {
|
||||
info.SupportedResourceTypes = append(info.SupportedResourceTypes, model.ResourceTypeChart)
|
||||
}
|
||||
labels := []*struct {
|
||||
Name string `json:"name"`
|
||||
}{}
|
||||
// label isn't supported in some previous version of Harbor
|
||||
if err := a.client.Get(a.getURL()+"/api/labels?scope=g", &labels); err != nil {
|
||||
if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
ls := []string{}
|
||||
for _, label := range labels {
|
||||
ls = append(ls, label.Name)
|
||||
}
|
||||
labelFilter := &model.FilterStyle{
|
||||
Type: model.FilterTypeLabel,
|
||||
Style: model.FilterStyleTypeList,
|
||||
Values: ls,
|
||||
}
|
||||
info.SupportedResourceFilters = append(info.SupportedResourceFilters, labelFilter)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -244,12 +259,15 @@ func (a *adapter) getProject(name string) (*project, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *adapter) getRepositories(projectID int64) ([]*repository, error) {
|
||||
repositories := []*repository{}
|
||||
func (a *adapter) getRepositories(projectID int64) ([]*adp.Repository, error) {
|
||||
repositories := []*adp.Repository{}
|
||||
url := fmt.Sprintf("%s/api/repositories?project_id=%d&page=1&page_size=500", a.getURL(), projectID)
|
||||
if err := a.client.GetAndIteratePagination(url, &repositories); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, repository := range repositories {
|
||||
repository.ResourceType = string(model.ResourceTypeImage)
|
||||
}
|
||||
return repositories, nil
|
||||
}
|
||||
|
||||
|
@ -24,45 +24,17 @@ import (
|
||||
"strings"
|
||||
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
adp "github.com/goharbor/harbor/src/replication/adapter"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
)
|
||||
|
||||
type chart struct {
|
||||
type label struct {
|
||||
Name string `json:"name"`
|
||||
Project string
|
||||
}
|
||||
|
||||
func (c *chart) Match(filters []*model.Filter) (bool, error) {
|
||||
supportedFilters := []*model.Filter{}
|
||||
for _, filter := range filters {
|
||||
if filter.Type == model.FilterTypeName {
|
||||
supportedFilters = append(supportedFilters, filter)
|
||||
}
|
||||
}
|
||||
item := &FilterItem{
|
||||
Value: fmt.Sprintf("%s/%s", c.Project, c.Name),
|
||||
}
|
||||
return item.Match(supportedFilters)
|
||||
}
|
||||
|
||||
type chartVersion struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
// TODO handle system/project level labels
|
||||
// Labels string `json:"labels"`
|
||||
}
|
||||
|
||||
func (c *chartVersion) Match(filters []*model.Filter) (bool, error) {
|
||||
supportedFilters := []*model.Filter{}
|
||||
for _, filter := range filters {
|
||||
if filter.Type == model.FilterTypeTag {
|
||||
supportedFilters = append(supportedFilters, filter)
|
||||
}
|
||||
}
|
||||
item := &FilterItem{
|
||||
Value: c.Version,
|
||||
}
|
||||
return item.Match(supportedFilters)
|
||||
Labels []*label `json:"labels"`
|
||||
}
|
||||
|
||||
type chartVersionDetail struct {
|
||||
@ -81,37 +53,60 @@ func (a *adapter) FetchCharts(filters []*model.Filter) ([]*model.Resource, error
|
||||
resources := []*model.Resource{}
|
||||
for _, project := range projects {
|
||||
url := fmt.Sprintf("%s/api/chartrepo/%s/charts", a.getURL(), project.Name)
|
||||
charts := []*chart{}
|
||||
if err := a.client.Get(url, &charts); err != nil {
|
||||
repositories := []*adp.Repository{}
|
||||
if err := a.client.Get(url, &repositories); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, chart := range charts {
|
||||
chart.Project = project.Name
|
||||
if len(repositories) == 0 {
|
||||
continue
|
||||
}
|
||||
charts, err := filterCharts(charts, filters)
|
||||
if err != nil {
|
||||
for _, repository := range repositories {
|
||||
repository.Name = fmt.Sprintf("%s/%s", project.Name, repository.Name)
|
||||
repository.ResourceType = string(model.ResourceTypeChart)
|
||||
}
|
||||
for _, filter := range filters {
|
||||
if err = filter.DoFilter(&repositories); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, chart := range charts {
|
||||
url := fmt.Sprintf("%s/api/chartrepo/%s/charts/%s", a.getURL(), project.Name, chart.Name)
|
||||
chartVersions := []*chartVersion{}
|
||||
if err := a.client.Get(url, &chartVersions); err != nil {
|
||||
}
|
||||
for _, repository := range repositories {
|
||||
name := strings.SplitN(repository.Name, "/", 2)[1]
|
||||
url := fmt.Sprintf("%s/api/chartrepo/%s/charts/%s", a.getURL(), project.Name, name)
|
||||
versions := []*chartVersion{}
|
||||
if err := a.client.Get(url, &versions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chartVersions, err = filterChartVersions(chartVersions, filters)
|
||||
if err != nil {
|
||||
if len(versions) == 0 {
|
||||
continue
|
||||
}
|
||||
vTags := []*adp.VTag{}
|
||||
for _, version := range versions {
|
||||
var labels []string
|
||||
for _, label := range version.Labels {
|
||||
labels = append(labels, label.Name)
|
||||
}
|
||||
vTags = append(vTags, &adp.VTag{
|
||||
Name: version.Version,
|
||||
Labels: labels,
|
||||
ResourceType: string(model.ResourceTypeChart),
|
||||
})
|
||||
}
|
||||
for _, filter := range filters {
|
||||
if err = filter.DoFilter(&vTags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, version := range chartVersions {
|
||||
}
|
||||
|
||||
for _, vTag := range vTags {
|
||||
resources = append(resources, &model.Resource{
|
||||
Type: model.ResourceTypeChart,
|
||||
Registry: a.registry,
|
||||
Metadata: &model.ResourceMetadata{
|
||||
Repository: &model.Repository{
|
||||
Name: fmt.Sprintf("%s/%s", project.Name, chart.Name),
|
||||
Name: repository.Name,
|
||||
Metadata: project.Metadata,
|
||||
},
|
||||
Vtags: []string{version.Version},
|
||||
Vtags: []string{vTag.Name},
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -232,31 +227,3 @@ func parseChartName(name string) (string, string, error) {
|
||||
}
|
||||
return "", "", fmt.Errorf("invalid chart name format: %s", name)
|
||||
}
|
||||
|
||||
func filterCharts(charts []*chart, filters []*model.Filter) ([]*chart, error) {
|
||||
result := []*chart{}
|
||||
for _, chart := range charts {
|
||||
match, err := chart.Match(filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
result = append(result, chart)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func filterChartVersions(chartVersions []*chartVersion, filters []*model.Filter) ([]*chartVersion, error) {
|
||||
result := []*chartVersion{}
|
||||
for _, chartVersion := range chartVersions {
|
||||
match, err := chartVersion.Match(filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
result = append(result, chartVersion)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@ -1,59 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package harbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"github.com/goharbor/harbor/src/replication/util"
|
||||
)
|
||||
|
||||
// TODO unify the filter logic from different adapters into one?
|
||||
// and move the code into a separated common package
|
||||
|
||||
// Filterable defines the interface that an object should implement
|
||||
// if the object can be filtered
|
||||
type Filterable interface {
|
||||
Match([]*model.Filter) (bool, error)
|
||||
}
|
||||
|
||||
// FilterItem is a filterable object that can be used to match string pattern
|
||||
type FilterItem struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
// Match ...
|
||||
func (f *FilterItem) Match(filters []*model.Filter) (bool, error) {
|
||||
if len(filters) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
matched := true
|
||||
for _, filter := range filters {
|
||||
pattern, ok := filter.Value.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("the type of filter value isn't string: %v", filter)
|
||||
}
|
||||
m, err := util.Match(pattern, f.Value)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !m {
|
||||
matched = false
|
||||
break
|
||||
}
|
||||
}
|
||||
return matched, nil
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package harbor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMatch(t *testing.T) {
|
||||
// nil filters
|
||||
item := &FilterItem{}
|
||||
match, err := item.Match(nil)
|
||||
require.Nil(t, err)
|
||||
assert.True(t, match)
|
||||
// contains filter whose value isn't string
|
||||
item = &FilterItem{}
|
||||
filters := []*model.Filter{
|
||||
{
|
||||
Type: "test",
|
||||
Value: 1,
|
||||
},
|
||||
}
|
||||
match, err = item.Match(filters)
|
||||
require.NotNil(t, err)
|
||||
// both filters match
|
||||
item = &FilterItem{
|
||||
Value: "b/c",
|
||||
}
|
||||
filters = []*model.Filter{
|
||||
{
|
||||
Value: "b/*",
|
||||
},
|
||||
{
|
||||
Value: "*/c",
|
||||
},
|
||||
}
|
||||
match, err = item.Match(filters)
|
||||
require.Nil(t, err)
|
||||
assert.True(t, match)
|
||||
// one filter matches and the other one doesn't
|
||||
item = &FilterItem{
|
||||
Value: "b/c",
|
||||
}
|
||||
filters = []*model.Filter{
|
||||
{
|
||||
Value: "b/*",
|
||||
},
|
||||
{
|
||||
Value: "d",
|
||||
},
|
||||
}
|
||||
match, err = item.Match(filters)
|
||||
require.Nil(t, err)
|
||||
assert.False(t, match)
|
||||
// both filters don't match
|
||||
item = &FilterItem{
|
||||
Value: "b/c",
|
||||
}
|
||||
filters = []*model.Filter{
|
||||
{
|
||||
Value: "f",
|
||||
},
|
||||
{
|
||||
Value: "d",
|
||||
},
|
||||
}
|
||||
match, err = item.Match(filters)
|
||||
require.Nil(t, err)
|
||||
assert.False(t, match)
|
||||
}
|
@ -19,44 +19,11 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
adp "github.com/goharbor/harbor/src/replication/adapter"
|
||||
"github.com/goharbor/harbor/src/replication/model"
|
||||
"github.com/goharbor/harbor/src/replication/util"
|
||||
)
|
||||
|
||||
type repository struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func (r *repository) Match(filters []*model.Filter) (bool, error) {
|
||||
supportedFilters := []*model.Filter{}
|
||||
for _, filter := range filters {
|
||||
if filter.Type == model.FilterTypeName {
|
||||
supportedFilters = append(supportedFilters, filter)
|
||||
}
|
||||
}
|
||||
item := &FilterItem{
|
||||
Value: r.Name,
|
||||
}
|
||||
return item.Match(supportedFilters)
|
||||
}
|
||||
|
||||
type tag struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func (t *tag) Match(filters []*model.Filter) (bool, error) {
|
||||
supportedFilters := []*model.Filter{}
|
||||
for _, filter := range filters {
|
||||
if filter.Type == model.FilterTypeTag {
|
||||
supportedFilters = append(supportedFilters, filter)
|
||||
}
|
||||
}
|
||||
item := &FilterItem{
|
||||
Value: t.Name,
|
||||
}
|
||||
return item.Match(supportedFilters)
|
||||
}
|
||||
|
||||
func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error) {
|
||||
projects, err := a.listCandidateProjects(filters)
|
||||
if err != nil {
|
||||
@ -68,26 +35,33 @@ func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repositories, err = filterRepositories(repositories, filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, repository := range repositories {
|
||||
url := fmt.Sprintf("%s/api/repositories/%s/tags", a.getURL(), repository.Name)
|
||||
tags := []*tag{}
|
||||
if err = a.client.Get(url, &tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags, err = filterTags(tags, filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
if len(repositories) == 0 {
|
||||
continue
|
||||
}
|
||||
vtags := []string{}
|
||||
for _, tag := range tags {
|
||||
vtags = append(vtags, tag.Name)
|
||||
for _, filter := range filters {
|
||||
if err = filter.DoFilter(&repositories); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, repository := range repositories {
|
||||
vTags, err := a.getTags(repository.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vTags) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, filter := range filters {
|
||||
if err = filter.DoFilter(&vTags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(vTags) == 0 {
|
||||
continue
|
||||
}
|
||||
tags := []string{}
|
||||
for _, vTag := range vTags {
|
||||
tags = append(tags, vTag.Name)
|
||||
}
|
||||
resources = append(resources, &model.Resource{
|
||||
Type: model.ResourceTypeImage,
|
||||
@ -97,7 +71,7 @@ func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error
|
||||
Name: repository.Name,
|
||||
Metadata: project.Metadata,
|
||||
},
|
||||
Vtags: vtags,
|
||||
Vtags: tags,
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -150,30 +124,28 @@ func (a *adapter) DeleteManifest(repository, reference string) error {
|
||||
return a.client.Delete(url)
|
||||
}
|
||||
|
||||
func filterRepositories(repositories []*repository, filters []*model.Filter) ([]*repository, error) {
|
||||
result := []*repository{}
|
||||
for _, repository := range repositories {
|
||||
match, err := repository.Match(filters)
|
||||
if err != nil {
|
||||
func (a *adapter) getTags(repository string) ([]*adp.VTag, error) {
|
||||
url := fmt.Sprintf("%s/api/repositories/%s/tags", a.getURL(), repository)
|
||||
tags := []*struct {
|
||||
Name string `json:"name"`
|
||||
Labels []*struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
}{}
|
||||
if err := a.client.Get(url, &tags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
result = append(result, repository)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func filterTags(tags []*tag, filters []*model.Filter) ([]*tag, error) {
|
||||
result := []*tag{}
|
||||
vTags := []*adp.VTag{}
|
||||
for _, tag := range tags {
|
||||
match, err := tag.Match(filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var labels []string
|
||||
for _, label := range tag.Labels {
|
||||
labels = append(labels, label.Name)
|
||||
}
|
||||
if match {
|
||||
result = append(result, tag)
|
||||
vTags = append(vTags, &adp.VTag{
|
||||
Name: tag.Name,
|
||||
Labels: labels,
|
||||
ResourceType: string(model.ResourceTypeImage),
|
||||
})
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
return vTags, nil
|
||||
}
|
||||
|
@ -21,6 +21,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/goharbor/harbor/src/replication/filter"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||
@ -50,6 +52,59 @@ type ImageRegistry interface {
|
||||
PushBlob(repository, digest string, size int64, blob io.Reader) error
|
||||
}
|
||||
|
||||
// Repository defines an repository object, it can be image repository, chart repository and etc.
|
||||
type Repository struct {
|
||||
ResourceType string `json:"resource_type"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// GetName returns the name
|
||||
func (r *Repository) GetName() string {
|
||||
return r.Name
|
||||
}
|
||||
|
||||
// GetFilterableType returns the filterable type
|
||||
func (r *Repository) GetFilterableType() filter.FilterableType {
|
||||
return filter.FilterableTypeRepository
|
||||
}
|
||||
|
||||
// GetResourceType returns the resource type
|
||||
func (r *Repository) GetResourceType() string {
|
||||
return r.ResourceType
|
||||
}
|
||||
|
||||
// GetLabels returns the labels
|
||||
func (r *Repository) GetLabels() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VTag defines an vTag object, it can be image tag, chart version and etc.
|
||||
type VTag struct {
|
||||
ResourceType string `json:"resource_type"`
|
||||
Name string `json:"name"`
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
// GetFilterableType returns the filterable type
|
||||
func (v *VTag) GetFilterableType() filter.FilterableType {
|
||||
return filter.FilterableTypeVTag
|
||||
}
|
||||
|
||||
// GetResourceType returns the resource type
|
||||
func (v *VTag) GetResourceType() string {
|
||||
return v.ResourceType
|
||||
}
|
||||
|
||||
// GetName returns the name
|
||||
func (v *VTag) GetName() string {
|
||||
return v.Name
|
||||
}
|
||||
|
||||
// GetLabels returns the labels
|
||||
func (v *VTag) GetLabels() []string {
|
||||
return v.Labels
|
||||
}
|
||||
|
||||
// DefaultImageRegistry provides a default implementation for interface ImageRegistry
|
||||
type DefaultImageRegistry struct {
|
||||
sync.RWMutex
|
||||
|
@ -33,7 +33,7 @@ func TestMain(m *testing.M) {
|
||||
"harbor_label", "harbor_resource_label", "harbor_user", "img_scan_job", "img_scan_overview",
|
||||
"job_log", "project", "project_member", "project_metadata", "properties", "registry",
|
||||
"replication_policy", "repository", "robot", "role", "schema_migrations", "user_group",
|
||||
"replication_execution", "replication_task", "replication_schedule_job", "oidc_user";`,
|
||||
"replication_execution", "replication_task", "replication_schedule_job", "oidc_user", "cve_whitelist";`,
|
||||
`DROP FUNCTION "update_update_time_at_column"();`,
|
||||
}
|
||||
dao.PrepareTestData(clearSqls, nil)
|
||||
|
247
src/replication/filter/filter.go
Normal file
247
src/replication/filter/filter.go
Normal file
@ -0,0 +1,247 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/replication/util"
|
||||
)
|
||||
|
||||
// const definitions
|
||||
const (
|
||||
FilterableTypeRepository = "repository"
|
||||
FilterableTypeVTag = "vtag"
|
||||
)
|
||||
|
||||
// FilterableType specifies the type of the filterable
|
||||
type FilterableType string
|
||||
|
||||
// Filterable defines the methods that a filterable object must implement
|
||||
type Filterable interface {
|
||||
// return what the type of the filterable object is(repository or vtag)
|
||||
GetFilterableType() FilterableType
|
||||
// return the resource type of the filterable object(image, chart, ...)
|
||||
GetResourceType() string
|
||||
GetName() string
|
||||
GetLabels() []string
|
||||
}
|
||||
|
||||
// Filter defines the methods that a filter must implement
|
||||
type Filter interface {
|
||||
// return whether the filter is applied to the specified Filterable
|
||||
ApplyTo(Filterable) bool
|
||||
Filter(...Filterable) ([]Filterable, error)
|
||||
}
|
||||
|
||||
// NewResourceTypeFilter return a Filter to filter candidates according to the resource type
|
||||
func NewResourceTypeFilter(resourceType string) Filter {
|
||||
return &resourceTypeFilter{
|
||||
resourceType: resourceType,
|
||||
}
|
||||
}
|
||||
|
||||
// NewRepositoryNameFilter return a Filter to filter the repositories according to the name
|
||||
func NewRepositoryNameFilter(pattern string) Filter {
|
||||
return &nameFilter{
|
||||
filterableType: FilterableTypeRepository,
|
||||
pattern: pattern,
|
||||
}
|
||||
}
|
||||
|
||||
// NewVTagNameFilter return a Filter to filter the vtags according to the name
|
||||
func NewVTagNameFilter(pattern string) Filter {
|
||||
return &nameFilter{
|
||||
filterableType: FilterableTypeVTag,
|
||||
pattern: pattern,
|
||||
}
|
||||
}
|
||||
|
||||
// NewVTagLabelFilter return a Filter to filter vtags according to the label
|
||||
func NewVTagLabelFilter(label string) Filter {
|
||||
return &labelFilter{
|
||||
label: label,
|
||||
}
|
||||
}
|
||||
|
||||
type resourceTypeFilter struct {
|
||||
resourceType string
|
||||
}
|
||||
|
||||
func (r *resourceTypeFilter) ApplyTo(filterable Filterable) bool {
|
||||
if filterable == nil {
|
||||
return false
|
||||
}
|
||||
switch filterable.GetFilterableType() {
|
||||
case FilterableTypeRepository, FilterableTypeVTag:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resourceTypeFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
|
||||
result := []Filterable{}
|
||||
for _, filterable := range filterables {
|
||||
if filterable.GetResourceType() == r.resourceType {
|
||||
result = append(result, filterable)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type nameFilter struct {
|
||||
filterableType FilterableType
|
||||
pattern string
|
||||
}
|
||||
|
||||
func (n *nameFilter) ApplyTo(filterable Filterable) bool {
|
||||
if filterable == nil {
|
||||
return false
|
||||
}
|
||||
if filterable.GetFilterableType() == n.filterableType {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (n *nameFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
|
||||
result := []Filterable{}
|
||||
for _, filterable := range filterables {
|
||||
name := filterable.GetName()
|
||||
match, err := util.Match(n.pattern, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
log.Debugf("%q matches the pattern %q of name filter", name, n.pattern)
|
||||
result = append(result, filterable)
|
||||
continue
|
||||
}
|
||||
log.Debugf("%q doesn't match the pattern %q of name filter, skip", name, n.pattern)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type labelFilter struct {
|
||||
label string
|
||||
}
|
||||
|
||||
func (l *labelFilter) ApplyTo(filterable Filterable) bool {
|
||||
if filterable == nil {
|
||||
return false
|
||||
}
|
||||
if filterable.GetFilterableType() == FilterableTypeVTag {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *labelFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
|
||||
// if no specified label in the filter, just returns the input filterable
|
||||
// candidate as the result
|
||||
if len(l.label) == 0 {
|
||||
return filterables, nil
|
||||
}
|
||||
result := []Filterable{}
|
||||
for _, filterable := range filterables {
|
||||
match := false
|
||||
for _, label := range filterable.GetLabels() {
|
||||
if label == l.label {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if match {
|
||||
result = append(result, filterable)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// DoFilter is a util function to help filter filterables easily.
|
||||
// The parameter "filterables" must be a pointer points to a slice
|
||||
// whose elements must be Filterable. After applying all the "filters"
|
||||
// to the "filterables", the result is put back into the variable
|
||||
// "filterables"
|
||||
func DoFilter(filterables interface{}, filters ...Filter) error {
|
||||
if filterables == nil || len(filters) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(filterables)
|
||||
// make sure the input is a pointer
|
||||
if value.Kind() != reflect.Ptr {
|
||||
return errors.New("the type of input should be pointer to a Filterable slice")
|
||||
}
|
||||
|
||||
sliceValue := value.Elem()
|
||||
// make sure the input is a pointer points to a slice
|
||||
if sliceValue.Type().Kind() != reflect.Slice {
|
||||
return errors.New("the type of input should be pointer to a Filterable slice")
|
||||
}
|
||||
|
||||
filterableType := reflect.TypeOf((*Filterable)(nil)).Elem()
|
||||
elemType := sliceValue.Type().Elem()
|
||||
// make sure the input is a pointer points to a Filterable slice
|
||||
if !elemType.Implements(filterableType) {
|
||||
return errors.New("the type of input should be pointer to a Filterable slice")
|
||||
}
|
||||
|
||||
// convert the input to Filterable slice
|
||||
items := []Filterable{}
|
||||
for i := 0; i < sliceValue.Len(); i++ {
|
||||
items = append(items, sliceValue.Index(i).Interface().(Filterable))
|
||||
}
|
||||
|
||||
// do filter
|
||||
var err error
|
||||
items, err = doFilter(items, filters...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// convert back to the origin type
|
||||
result := reflect.MakeSlice(reflect.SliceOf(elemType), 0, len(items))
|
||||
for _, item := range items {
|
||||
result = reflect.Append(result, reflect.ValueOf(item))
|
||||
}
|
||||
value.Elem().Set(result)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func doFilter(filterables []Filterable, filters ...Filter) ([]Filterable, error) {
|
||||
var appliedTo, notAppliedTo []Filterable
|
||||
var err error
|
||||
for _, filter := range filters {
|
||||
appliedTo, notAppliedTo = nil, nil
|
||||
for _, filterable := range filterables {
|
||||
if filter.ApplyTo(filterable) {
|
||||
appliedTo = append(appliedTo, filterable)
|
||||
} else {
|
||||
notAppliedTo = append(notAppliedTo, filterable)
|
||||
}
|
||||
}
|
||||
filterables, err = filter.Filter(appliedTo...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterables = append(filterables, notAppliedTo...)
|
||||
}
|
||||
return filterables, nil
|
||||
}
|
170
src/replication/filter/filter_test.go
Normal file
170
src/replication/filter/filter_test.go
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package filter
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fakeFilterable struct {
|
||||
filterableType FilterableType
|
||||
resourceType string
|
||||
name string
|
||||
labels []string
|
||||
}
|
||||
|
||||
func (f *fakeFilterable) GetFilterableType() FilterableType {
|
||||
return f.filterableType
|
||||
}
|
||||
|
||||
func (f *fakeFilterable) GetResourceType() string {
|
||||
return f.resourceType
|
||||
}
|
||||
|
||||
func (f *fakeFilterable) GetName() string {
|
||||
return f.name
|
||||
}
|
||||
func (f *fakeFilterable) GetLabels() []string {
|
||||
return f.labels
|
||||
}
|
||||
|
||||
func TestFilterOfResourceTypeFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
filterableType: FilterableTypeRepository,
|
||||
resourceType: "image",
|
||||
name: "library/hello-world",
|
||||
}
|
||||
|
||||
filter := NewResourceTypeFilter("image")
|
||||
result, err := filter.Filter(filterable)
|
||||
require.Nil(t, nil, err)
|
||||
if assert.Equal(t, 1, len(result)) {
|
||||
assert.True(t, reflect.DeepEqual(filterable, result[0]))
|
||||
}
|
||||
|
||||
filter = NewResourceTypeFilter("chart")
|
||||
result, err = filter.Filter(filterable)
|
||||
require.Nil(t, nil, err)
|
||||
assert.Equal(t, 0, len(result))
|
||||
}
|
||||
|
||||
func TestApplyToOfResourceTypeFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
filterableType: FilterableTypeRepository,
|
||||
}
|
||||
|
||||
filter := NewResourceTypeFilter("image")
|
||||
assert.True(t, filter.ApplyTo(filterable))
|
||||
|
||||
filterable.filterableType = FilterableTypeVTag
|
||||
assert.True(t, filter.ApplyTo(filterable))
|
||||
|
||||
filterable.filterableType = FilterableType("unknown")
|
||||
assert.False(t, filter.ApplyTo(filterable))
|
||||
}
|
||||
|
||||
func TestFilterOfNameFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
name: "foo",
|
||||
}
|
||||
// pass the filter
|
||||
filter := &nameFilter{
|
||||
pattern: "*",
|
||||
}
|
||||
result, err := filter.Filter(filterable)
|
||||
require.Nil(t, err)
|
||||
if assert.Equal(t, 1, len(result)) {
|
||||
assert.True(t, reflect.DeepEqual(filterable, result[0].(*fakeFilterable)))
|
||||
}
|
||||
|
||||
// cannot pass the filter
|
||||
filter.pattern = "cannotpass"
|
||||
result, err = filter.Filter(filterable)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 0, len(result))
|
||||
}
|
||||
|
||||
func TestApplyToOfNameFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
filterableType: FilterableTypeRepository,
|
||||
}
|
||||
|
||||
filter := &nameFilter{
|
||||
filterableType: FilterableTypeRepository,
|
||||
}
|
||||
assert.True(t, filter.ApplyTo(filterable))
|
||||
|
||||
filterable.filterableType = FilterableTypeVTag
|
||||
assert.False(t, filter.ApplyTo(filterable))
|
||||
}
|
||||
|
||||
func TestFilterOfLabelFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
labels: []string{"production"},
|
||||
}
|
||||
// pass the filter
|
||||
filter := &labelFilter{
|
||||
label: "production",
|
||||
}
|
||||
result, err := filter.Filter(filterable)
|
||||
require.Nil(t, err)
|
||||
if assert.Equal(t, 1, len(result)) {
|
||||
assert.True(t, reflect.DeepEqual(filterable, result[0].(*fakeFilterable)))
|
||||
}
|
||||
// cannot pass the filter
|
||||
filter.label = "cannotpass"
|
||||
result, err = filter.Filter(filterable)
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 0, len(result))
|
||||
}
|
||||
|
||||
func TestApplyToOfLabelFilter(t *testing.T) {
|
||||
filterable := &fakeFilterable{
|
||||
filterableType: FilterableTypeRepository,
|
||||
}
|
||||
|
||||
filter := labelFilter{}
|
||||
assert.False(t, filter.ApplyTo(filterable))
|
||||
|
||||
filterable.filterableType = FilterableTypeVTag
|
||||
assert.True(t, filter.ApplyTo(filterable))
|
||||
}
|
||||
|
||||
func TestDoFilter(t *testing.T) {
|
||||
tag1 := &fakeFilterable{
|
||||
filterableType: FilterableTypeVTag,
|
||||
name: "1.0",
|
||||
labels: []string{"production"},
|
||||
}
|
||||
tag2 := &fakeFilterable{
|
||||
filterableType: FilterableTypeVTag,
|
||||
name: "latest",
|
||||
labels: []string{"dev"},
|
||||
}
|
||||
filterables := []Filterable{tag1, tag2}
|
||||
filters := []Filter{
|
||||
NewVTagNameFilter("*"),
|
||||
NewVTagLabelFilter("production"),
|
||||
}
|
||||
err := DoFilter(&filterables, filters...)
|
||||
require.Nil(t, err)
|
||||
if assert.Equal(t, 1, len(filterables)) {
|
||||
assert.True(t, reflect.DeepEqual(tag1, filterables[0]))
|
||||
}
|
||||
}
|
@ -18,6 +18,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/replication/filter"
|
||||
|
||||
"github.com/astaxie/beego/validation"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/robfig/cron"
|
||||
@ -133,6 +135,29 @@ type Filter struct {
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// DoFilter filter the filterables
|
||||
// The parameter "filterables" must be a pointer points to a slice
|
||||
// whose elements must be Filterable. After applying the filter
|
||||
// to the "filterables", the result is put back into the variable
|
||||
// "filterables"
|
||||
func (f *Filter) DoFilter(filterables interface{}) error {
|
||||
var ft filter.Filter
|
||||
switch f.Type {
|
||||
case FilterTypeName:
|
||||
ft = filter.NewRepositoryNameFilter(f.Value.(string))
|
||||
case FilterTypeTag:
|
||||
ft = filter.NewVTagNameFilter(f.Value.(string))
|
||||
case FilterTypeLabel:
|
||||
ft = filter.NewVTagLabelFilter(f.Value.(string))
|
||||
case FilterTypeResource:
|
||||
ft = filter.NewResourceTypeFilter(f.Value.(string))
|
||||
default:
|
||||
return fmt.Errorf("unsupported filter type: %s", f.Type)
|
||||
}
|
||||
|
||||
return filter.DoFilter(filterables, ft)
|
||||
}
|
||||
|
||||
// TriggerType represents the type of trigger.
|
||||
type TriggerType string
|
||||
|
||||
|
@ -27,9 +27,11 @@ const (
|
||||
RegistryTypeDockerRegistry RegistryType = "docker-registry"
|
||||
RegistryTypeHuawei RegistryType = "huawei-SWR"
|
||||
RegistryTypeGoogleGcr RegistryType = "google-gcr"
|
||||
RegistryTypeAwsEcr RegistryType = "aws-ecr"
|
||||
|
||||
FilterStyleTypeText = "input"
|
||||
FilterStyleTypeRadio = "radio"
|
||||
FilterStyleTypeList = "list"
|
||||
)
|
||||
|
||||
// RegistryType indicates the type of registry
|
||||
|
@ -258,7 +258,7 @@ func parseFilters(str string) ([]*model.Filter, error) {
|
||||
case "tag":
|
||||
filter.Type = model.FilterTypeTag
|
||||
case "label":
|
||||
// TODO if we support the label filter, remove the checking logic here
|
||||
// drop all legend label filters
|
||||
continue
|
||||
default:
|
||||
log.Warningf("unknown filter type: %s", filter.Type)
|
||||
|
@ -37,6 +37,8 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/huawei"
|
||||
// register the Google Gcr adapter
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/googlegcr"
|
||||
// register the AwsEcr adapter
|
||||
_ "github.com/goharbor/harbor/src/replication/adapter/awsecr"
|
||||
)
|
||||
|
||||
var (
|
||||
|
202
src/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
202
src/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
3
src/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
3
src/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
AWS SDK for Go
|
||||
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
Copyright 2014-2015 Stripe, Inc.
|
164
src/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
164
src/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
// Package awserr represents API error interface accessors for the SDK.
|
||||
package awserr
|
||||
|
||||
// An Error wraps lower level errors with code, message and an original error.
|
||||
// The underlying concrete error type may also satisfy other interfaces which
|
||||
// can be to used to obtain more specific information about the error.
|
||||
//
|
||||
// Calling Error() or String() will always include the full information about
|
||||
// an error based on its underlying type.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Get error details
|
||||
// log.Println("Error:", awsErr.Code(), awsErr.Message())
|
||||
//
|
||||
// // Prints out full error message, including original error if there was one.
|
||||
// log.Println("Error:", awsErr.Error())
|
||||
//
|
||||
// // Get original error
|
||||
// if origErr := awsErr.OrigErr(); origErr != nil {
|
||||
// // operate on original error.
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type Error interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
||||
// Returns the short phrase depicting the classification of the error.
|
||||
Code() string
|
||||
|
||||
// Returns the error details message.
|
||||
Message() string
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErr() error
|
||||
}
|
||||
|
||||
// BatchError is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occurred in the batch.
|
||||
//
|
||||
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||
// compatibility.
|
||||
type BatchError interface {
|
||||
// Satisfy the generic error interface.
|
||||
error
|
||||
|
||||
// Returns the short phrase depicting the classification of the error.
|
||||
Code() string
|
||||
|
||||
// Returns the error details message.
|
||||
Message() string
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||
// code, message, and original errors. Calling Error() will include all errors
|
||||
// that occurred in the batch.
|
||||
//
|
||||
// Replaces BatchError
|
||||
type BatchedErrors interface {
|
||||
// Satisfy the base Error interface.
|
||||
Error
|
||||
|
||||
// Returns the original error if one was set. Nil is returned if not set.
|
||||
OrigErrs() []error
|
||||
}
|
||||
|
||||
// New returns an Error object described by the code, message, and origErr.
|
||||
//
|
||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||
// Error object and will instead be returned.
|
||||
func New(code, message string, origErr error) Error {
|
||||
var errs []error
|
||||
if origErr != nil {
|
||||
errs = append(errs, origErr)
|
||||
}
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||
// array of errors.
|
||||
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||
return newBaseError(code, message, errs)
|
||||
}
|
||||
|
||||
// A RequestFailure is an interface to extract request failure information from
|
||||
// an Error such as the request ID of the failed request returned by a service.
|
||||
// RequestFailures may not always have a requestID value if the request failed
|
||||
// prior to reaching the service such as a connection error.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if reqerr, ok := err.(RequestFailure); ok {
|
||||
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||
// } else {
|
||||
// log.Println("Error:", err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Combined with awserr.Error:
|
||||
//
|
||||
// output, err := s3manage.Upload(svc, input, opts)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// // Generic AWS Error with Code, Message, and original error (if any)
|
||||
// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
|
||||
//
|
||||
// if reqErr, ok := err.(awserr.RequestFailure); ok {
|
||||
// // A service error occurred
|
||||
// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
|
||||
// }
|
||||
// } else {
|
||||
// fmt.Println(err.Error())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type RequestFailure interface {
|
||||
Error
|
||||
|
||||
// The status code of the HTTP response.
|
||||
StatusCode() int
|
||||
|
||||
// The request ID returned by the service for a request failure. This will
|
||||
// be empty if no request ID is available such as the request failed due
|
||||
// to a connection error.
|
||||
RequestID() string
|
||||
}
|
||||
|
||||
// NewRequestFailure returns a wrapped error with additional information for
|
||||
// request status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
// that may be meaningful.
|
||||
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
|
||||
return newRequestError(err, statusCode, reqID)
|
||||
}
|
||||
|
||||
// UnmarshalError provides the interface for the SDK failing to unmarshal data.
|
||||
type UnmarshalError interface {
|
||||
awsError
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
|
||||
// the bytes that fail to unmarshal to the error.
|
||||
func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
|
||||
return &unmarshalError{
|
||||
awsError: New("UnmarshalError", msg, err),
|
||||
bytes: bytes,
|
||||
}
|
||||
}
|
221
src/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
221
src/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
package awserr
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// SprintError returns a string of the formatted error code.
|
||||
//
|
||||
// Both extra and origErr are optional. If they are included their lines
|
||||
// will be added, but if they are not included their lines will be ignored.
|
||||
func SprintError(code, message, extra string, origErr error) string {
|
||||
msg := fmt.Sprintf("%s: %s", code, message)
|
||||
if extra != "" {
|
||||
msg = fmt.Sprintf("%s\n\t%s", msg, extra)
|
||||
}
|
||||
if origErr != nil {
|
||||
msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// A baseError wraps the code and message which defines an error. It also
|
||||
// can be used to wrap an original error object.
|
||||
//
|
||||
// Should be used as the root for errors satisfying the awserr.Error. Also
|
||||
// for any error which does not fit into a specific error wrapper type.
|
||||
type baseError struct {
|
||||
// Classification of error
|
||||
code string
|
||||
|
||||
// Detailed information about error
|
||||
message string
|
||||
|
||||
// Optional original error this error is based off of. Allows building
|
||||
// chained errors.
|
||||
errs []error
|
||||
}
|
||||
|
||||
// newBaseError returns an error object for the code, message, and errors.
|
||||
//
|
||||
// code is a short no whitespace phrase depicting the classification of
|
||||
// the error that is being created.
|
||||
//
|
||||
// message is the free flow string containing detailed information about the
|
||||
// error.
|
||||
//
|
||||
// origErrs is the error objects which will be nested under the new errors to
|
||||
// be returned.
|
||||
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||
b := &baseError{
|
||||
code: code,
|
||||
message: message,
|
||||
errs: origErrs,
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// See ErrorWithExtra for formatting.
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (b baseError) Error() string {
|
||||
size := len(b.errs)
|
||||
if size > 0 {
|
||||
return SprintError(b.code, b.message, "", errorList(b.errs))
|
||||
}
|
||||
|
||||
return SprintError(b.code, b.message, "", nil)
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (b baseError) String() string {
|
||||
return b.Error()
|
||||
}
|
||||
|
||||
// Code returns the short phrase depicting the classification of the error.
|
||||
func (b baseError) Code() string {
|
||||
return b.code
|
||||
}
|
||||
|
||||
// Message returns the error details message.
|
||||
func (b baseError) Message() string {
|
||||
return b.message
|
||||
}
|
||||
|
||||
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||
// error was set. This only returns the first element in the list. If the full
|
||||
// list is needed, use BatchedErrors.
|
||||
func (b baseError) OrigErr() error {
|
||||
switch len(b.errs) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return b.errs[0]
|
||||
default:
|
||||
if err, ok := b.errs[0].(Error); ok {
|
||||
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||
}
|
||||
return NewBatchError("BatchedErrors",
|
||||
"multiple errors occurred", b.errs)
|
||||
}
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (b baseError) OrigErrs() []error {
|
||||
return b.errs
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the requestError struct and not conflict with the error.Error() method.
|
||||
type awsError Error
|
||||
|
||||
// A requestError wraps a request or service error.
|
||||
//
|
||||
// Composed of baseError for code, message, and original error.
|
||||
type requestError struct {
|
||||
awsError
|
||||
statusCode int
|
||||
requestID string
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
// newRequestError returns a wrapped error with additional information for
|
||||
// request status code, and service requestID.
|
||||
//
|
||||
// Should be used to wrap all request which involve service requests. Even if
|
||||
// the request failed without a service response, but had an HTTP status code
|
||||
// that may be meaningful.
|
||||
//
|
||||
// Also wraps original errors via the baseError.
|
||||
func newRequestError(err Error, statusCode int, requestID string) *requestError {
|
||||
return &requestError{
|
||||
awsError: err,
|
||||
statusCode: statusCode,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// Satisfies the error interface.
|
||||
func (r requestError) Error() string {
|
||||
extra := fmt.Sprintf("status code: %d, request id: %s",
|
||||
r.statusCode, r.requestID)
|
||||
return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (r requestError) String() string {
|
||||
return r.Error()
|
||||
}
|
||||
|
||||
// StatusCode returns the wrapped status code for the error
|
||||
func (r requestError) StatusCode() int {
|
||||
return r.statusCode
|
||||
}
|
||||
|
||||
// RequestID returns the wrapped requestID
|
||||
func (r requestError) RequestID() string {
|
||||
return r.requestID
|
||||
}
|
||||
|
||||
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||
// returned if no error was set.
|
||||
func (r requestError) OrigErrs() []error {
|
||||
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||
return b.OrigErrs()
|
||||
}
|
||||
return []error{r.OrigErr()}
|
||||
}
|
||||
|
||||
type unmarshalError struct {
|
||||
awsError
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
// Satisfies the error interface.
|
||||
func (e unmarshalError) Error() string {
|
||||
extra := hex.Dump(e.bytes)
|
||||
return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
// Alias for Error to satisfy the stringer interface.
|
||||
func (e unmarshalError) String() string {
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Bytes returns the bytes that failed to unmarshal.
|
||||
func (e unmarshalError) Bytes() []byte {
|
||||
return e.bytes
|
||||
}
|
||||
|
||||
// An error list that satisfies the golang interface
|
||||
type errorList []error
|
||||
|
||||
// Error returns the string representation of the error.
|
||||
//
|
||||
// Satisfies the error interface.
|
||||
func (e errorList) Error() string {
|
||||
msg := ""
|
||||
// How do we want to handle the array size being zero
|
||||
if size := len(e); size > 0 {
|
||||
for i := 0; i < size; i++ {
|
||||
msg += fmt.Sprintf("%s", e[i].Error())
|
||||
// We check the next index to see if it is within the slice.
|
||||
// If it is, then we append a newline. We do this, because unit tests
|
||||
// could be broken with the additional '\n'
|
||||
if i+1 < size {
|
||||
msg += "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
108
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
108
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Copy deeply copies a src structure to dst. Useful for copying request and
|
||||
// response structures.
|
||||
//
|
||||
// Can copy between structs of different type, but will only copy fields which
|
||||
// are assignable, and exist in both structs. Fields which are not assignable,
|
||||
// or do not exist in both structs are ignored.
|
||||
func Copy(dst, src interface{}) {
|
||||
dstval := reflect.ValueOf(dst)
|
||||
if !dstval.IsValid() {
|
||||
panic("Copy dst cannot be nil")
|
||||
}
|
||||
|
||||
rcopy(dstval, reflect.ValueOf(src), true)
|
||||
}
|
||||
|
||||
// CopyOf returns a copy of src while also allocating the memory for dst.
|
||||
// src must be a pointer type or this operation will fail.
|
||||
func CopyOf(src interface{}) (dst interface{}) {
|
||||
dsti := reflect.New(reflect.TypeOf(src).Elem())
|
||||
dst = dsti.Interface()
|
||||
rcopy(dsti, reflect.ValueOf(src), true)
|
||||
return
|
||||
}
|
||||
|
||||
// rcopy performs a recursive copy of values from the source to destination.
|
||||
//
|
||||
// root is used to skip certain aspects of the copy which are not valid
|
||||
// for the root node of a object.
|
||||
func rcopy(dst, src reflect.Value, root bool) {
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
switch src.Kind() {
|
||||
case reflect.Ptr:
|
||||
if _, ok := src.Interface().(io.Reader); ok {
|
||||
if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
|
||||
dst.Elem().Set(src)
|
||||
} else if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else {
|
||||
e := src.Type().Elem()
|
||||
if dst.CanSet() && !src.IsNil() {
|
||||
if _, ok := src.Interface().(*time.Time); !ok {
|
||||
dst.Set(reflect.New(e))
|
||||
} else {
|
||||
tempValue := reflect.New(e)
|
||||
tempValue.Elem().Set(src.Elem())
|
||||
// Sets time.Time's unexported values
|
||||
dst.Set(tempValue)
|
||||
}
|
||||
}
|
||||
if src.Elem().IsValid() {
|
||||
// Keep the current root state since the depth hasn't changed
|
||||
rcopy(dst.Elem(), src.Elem(), root)
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
t := dst.Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
name := t.Field(i).Name
|
||||
srcVal := src.FieldByName(name)
|
||||
dstVal := dst.FieldByName(name)
|
||||
if srcVal.IsValid() && dstVal.CanSet() {
|
||||
rcopy(dstVal, srcVal, false)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
|
||||
dst.Set(s)
|
||||
for i := 0; i < src.Len(); i++ {
|
||||
rcopy(dst.Index(i), src.Index(i), false)
|
||||
}
|
||||
case reflect.Map:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
|
||||
s := reflect.MakeMap(src.Type())
|
||||
dst.Set(s)
|
||||
for _, k := range src.MapKeys() {
|
||||
v := src.MapIndex(k)
|
||||
v2 := reflect.New(v.Type()).Elem()
|
||||
rcopy(v2, v, false)
|
||||
dst.SetMapIndex(k, v2)
|
||||
}
|
||||
default:
|
||||
// Assign the value if possible. If its not assignable, the value would
|
||||
// need to be converted and the impact of that may be unexpected, or is
|
||||
// not compatible with the dst type.
|
||||
if src.Type().AssignableTo(dst.Type()) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
}
|
27
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
27
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
|
||||
// In addition to this, this method will also dereference the input values if
|
||||
// possible so the DeepEqual performed will not fail if one parameter is a
|
||||
// pointer and the other is not.
|
||||
//
|
||||
// DeepEqual will not perform indirection of nested values of the input parameters.
|
||||
func DeepEqual(a, b interface{}) bool {
|
||||
ra := reflect.Indirect(reflect.ValueOf(a))
|
||||
rb := reflect.Indirect(reflect.ValueOf(b))
|
||||
|
||||
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
|
||||
// If the elements are both nil, and of the same type they are equal
|
||||
// If they are of different types they are not equal
|
||||
return reflect.TypeOf(a) == reflect.TypeOf(b)
|
||||
} else if raValid != rbValid {
|
||||
// Both values must be valid to be equal
|
||||
return false
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(ra.Interface(), rb.Interface())
|
||||
}
|
222
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
222
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
generated
vendored
Normal file
@ -0,0 +1,222 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jmespath/go-jmespath"
|
||||
)
|
||||
|
||||
var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
|
||||
|
||||
// rValuesAtPath returns a slice of values found in value v. The values
|
||||
// in v are explored recursively so all nested values are collected.
|
||||
func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
|
||||
pathparts := strings.Split(path, "||")
|
||||
if len(pathparts) > 1 {
|
||||
for _, pathpart := range pathparts {
|
||||
vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
|
||||
if len(vals) > 0 {
|
||||
return vals
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
|
||||
components := strings.Split(path, ".")
|
||||
for len(values) > 0 && len(components) > 0 {
|
||||
var index *int64
|
||||
var indexStar bool
|
||||
c := strings.TrimSpace(components[0])
|
||||
if c == "" { // no actual component, illegal syntax
|
||||
return nil
|
||||
} else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
|
||||
// TODO normalize case for user
|
||||
return nil // don't support unexported fields
|
||||
}
|
||||
|
||||
// parse this component
|
||||
if m := indexRe.FindStringSubmatch(c); m != nil {
|
||||
c = m[1]
|
||||
if m[2] == "" {
|
||||
index = nil
|
||||
indexStar = true
|
||||
} else {
|
||||
i, _ := strconv.ParseInt(m[2], 10, 32)
|
||||
index = &i
|
||||
indexStar = false
|
||||
}
|
||||
}
|
||||
|
||||
nextvals := []reflect.Value{}
|
||||
for _, value := range values {
|
||||
// pull component name out of struct member
|
||||
if value.Kind() != reflect.Struct {
|
||||
continue
|
||||
}
|
||||
|
||||
if c == "*" { // pull all members
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
if f := reflect.Indirect(value.Field(i)); f.IsValid() {
|
||||
nextvals = append(nextvals, f)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value = value.FieldByNameFunc(func(name string) bool {
|
||||
if c == name {
|
||||
return true
|
||||
} else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
|
||||
if !value.IsNil() {
|
||||
value.Set(reflect.Zero(value.Type()))
|
||||
}
|
||||
return []reflect.Value{value}
|
||||
}
|
||||
|
||||
if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
// TODO if the value is the terminus it should not be created
|
||||
// if the value to be set to its position is nil.
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
value = value.Elem()
|
||||
} else {
|
||||
value = reflect.Indirect(value)
|
||||
}
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
|
||||
if indexStar || index != nil {
|
||||
nextvals = []reflect.Value{}
|
||||
for _, valItem := range values {
|
||||
value := reflect.Indirect(valItem)
|
||||
if value.Kind() != reflect.Slice {
|
||||
continue
|
||||
}
|
||||
|
||||
if indexStar { // grab all indices
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
idx := reflect.Indirect(value.Index(i))
|
||||
if idx.IsValid() {
|
||||
nextvals = append(nextvals, idx)
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// pull out index
|
||||
i := int(*index)
|
||||
if i >= value.Len() { // check out of bounds
|
||||
if createPath {
|
||||
// TODO resize slice
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else if i < 0 { // support negative indexing
|
||||
i = value.Len() + i
|
||||
}
|
||||
value = reflect.Indirect(value.Index(i))
|
||||
|
||||
if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
|
||||
if !createPath && value.IsNil() {
|
||||
value = reflect.ValueOf(nil)
|
||||
}
|
||||
}
|
||||
|
||||
if value.IsValid() {
|
||||
nextvals = append(nextvals, value)
|
||||
}
|
||||
}
|
||||
values = nextvals
|
||||
}
|
||||
|
||||
components = components[1:]
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// ValuesAtPath returns a list of values at the case insensitive lexical
|
||||
// path inside of a structure.
|
||||
func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
|
||||
result, err := jmespath.Search(path, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(result)
|
||||
if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
|
||||
return nil, nil
|
||||
}
|
||||
if s, ok := result.([]interface{}); ok {
|
||||
return s, err
|
||||
}
|
||||
if v.Kind() == reflect.Map && v.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if v.Kind() == reflect.Slice {
|
||||
out := make([]interface{}, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
out[i] = v.Index(i).Interface()
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
return []interface{}{result}, nil
|
||||
}
|
||||
|
||||
// SetValueAtPath sets a value at the case insensitive lexical path inside
|
||||
// of a structure.
|
||||
func SetValueAtPath(i interface{}, path string, v interface{}) {
|
||||
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
|
||||
for _, rval := range rvals {
|
||||
if rval.Kind() == reflect.Ptr && rval.IsNil() {
|
||||
continue
|
||||
}
|
||||
setValue(rval, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setValue(dstVal reflect.Value, src interface{}) {
|
||||
if dstVal.Kind() == reflect.Ptr {
|
||||
dstVal = reflect.Indirect(dstVal)
|
||||
}
|
||||
srcVal := reflect.ValueOf(src)
|
||||
|
||||
if !srcVal.IsValid() { // src is literal nil
|
||||
if dstVal.CanAddr() {
|
||||
// Convert to pointer so that pointer's value can be nil'ed
|
||||
// dstVal = dstVal.Addr()
|
||||
}
|
||||
dstVal.Set(reflect.Zero(dstVal.Type()))
|
||||
|
||||
} else if srcVal.Kind() == reflect.Ptr {
|
||||
if srcVal.IsNil() {
|
||||
srcVal = reflect.Zero(dstVal.Type())
|
||||
} else {
|
||||
srcVal = reflect.ValueOf(src).Elem()
|
||||
}
|
||||
dstVal.Set(srcVal)
|
||||
} else {
|
||||
dstVal.Set(srcVal)
|
||||
}
|
||||
|
||||
}
|
113
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
113
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Prettify returns the string representation of a value.
|
||||
func Prettify(i interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
prettify(reflect.ValueOf(i), 0, &buf)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// prettify will recursively walk value v to build a textual
|
||||
// representation of the value.
|
||||
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
strtype := v.Type().String()
|
||||
if strtype == "time.Time" {
|
||||
fmt.Fprintf(buf, "%s", v.Interface())
|
||||
break
|
||||
} else if strings.HasPrefix(strtype, "io.") {
|
||||
buf.WriteString("<buffer>")
|
||||
break
|
||||
}
|
||||
|
||||
buf.WriteString("{\n")
|
||||
|
||||
names := []string{}
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
name := v.Type().Field(i).Name
|
||||
f := v.Field(i)
|
||||
if name[0:1] == strings.ToLower(name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
for i, n := range names {
|
||||
val := v.FieldByName(n)
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(n + ": ")
|
||||
prettify(val, indent+2, buf)
|
||||
|
||||
if i < len(names)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
case reflect.Slice:
|
||||
strtype := v.Type().String()
|
||||
if strtype == "[]uint8" {
|
||||
fmt.Fprintf(buf, "<binary> len %d", v.Len())
|
||||
break
|
||||
}
|
||||
|
||||
nl, id, id2 := "", "", ""
|
||||
if v.Len() > 3 {
|
||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||
}
|
||||
buf.WriteString("[" + nl)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
buf.WriteString(id2)
|
||||
prettify(v.Index(i), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString("," + nl)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(nl + id + "]")
|
||||
case reflect.Map:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i, k := range v.MapKeys() {
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(k.String() + ": ")
|
||||
prettify(v.MapIndex(k), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
if !v.IsValid() {
|
||||
fmt.Fprint(buf, "<invalid value>")
|
||||
return
|
||||
}
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
format = "%q"
|
||||
case io.ReadSeeker, io.Reader:
|
||||
format = "buffer(%p)"
|
||||
}
|
||||
fmt.Fprintf(buf, format, v.Interface())
|
||||
}
|
||||
}
|
88
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
88
src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package awsutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// StringValue returns the string representation of a value.
|
||||
func StringValue(i interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
stringValue(reflect.ValueOf(i), 0, &buf)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||
for v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i := 0; i < v.Type().NumField(); i++ {
|
||||
ft := v.Type().Field(i)
|
||||
fv := v.Field(i)
|
||||
|
||||
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
|
||||
continue // ignore unexported fields
|
||||
}
|
||||
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
|
||||
continue // ignore unset fields
|
||||
}
|
||||
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(ft.Name + ": ")
|
||||
|
||||
if tag := ft.Tag.Get("sensitive"); tag == "true" {
|
||||
buf.WriteString("<sensitive>")
|
||||
} else {
|
||||
stringValue(fv, indent+2, buf)
|
||||
}
|
||||
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
case reflect.Slice:
|
||||
nl, id, id2 := "", "", ""
|
||||
if v.Len() > 3 {
|
||||
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
|
||||
}
|
||||
buf.WriteString("[" + nl)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
buf.WriteString(id2)
|
||||
stringValue(v.Index(i), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString("," + nl)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(nl + id + "]")
|
||||
case reflect.Map:
|
||||
buf.WriteString("{\n")
|
||||
|
||||
for i, k := range v.MapKeys() {
|
||||
buf.WriteString(strings.Repeat(" ", indent+2))
|
||||
buf.WriteString(k.String() + ": ")
|
||||
stringValue(v.MapIndex(k), indent+2, buf)
|
||||
|
||||
if i < v.Len()-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||
default:
|
||||
format := "%v"
|
||||
switch v.Interface().(type) {
|
||||
case string:
|
||||
format = "%q"
|
||||
}
|
||||
fmt.Fprintf(buf, format, v.Interface())
|
||||
}
|
||||
}
|
96
src/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
96
src/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
generated
vendored
Normal file
@ -0,0 +1,96 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// A Config provides configuration to a service client instance.
|
||||
type Config struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
Endpoint string
|
||||
SigningRegion string
|
||||
SigningName string
|
||||
|
||||
// States that the signing name did not come from a modeled source but
|
||||
// was derived based on other data. Used by service client constructors
|
||||
// to determine if the signin name can be overridden based on metadata the
|
||||
// service has.
|
||||
SigningNameDerived bool
|
||||
}
|
||||
|
||||
// ConfigProvider provides a generic way for a service client to receive
|
||||
// the ClientConfig without circular dependencies.
|
||||
type ConfigProvider interface {
|
||||
ClientConfig(serviceName string, cfgs ...*aws.Config) Config
|
||||
}
|
||||
|
||||
// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
|
||||
// resolve the endpoint automatically. The service client's endpoint must be
|
||||
// provided via the aws.Config.Endpoint field.
|
||||
type ConfigNoResolveEndpointProvider interface {
|
||||
ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
|
||||
}
|
||||
|
||||
// A Client implements the base client request and response handling
|
||||
// used by all service clients.
|
||||
type Client struct {
|
||||
request.Retryer
|
||||
metadata.ClientInfo
|
||||
|
||||
Config aws.Config
|
||||
Handlers request.Handlers
|
||||
}
|
||||
|
||||
// New will return a pointer to a new initialized service client.
|
||||
func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
|
||||
svc := &Client{
|
||||
Config: cfg,
|
||||
ClientInfo: info,
|
||||
Handlers: handlers.Copy(),
|
||||
}
|
||||
|
||||
switch retryer, ok := cfg.Retryer.(request.Retryer); {
|
||||
case ok:
|
||||
svc.Retryer = retryer
|
||||
case cfg.Retryer != nil && cfg.Logger != nil:
|
||||
s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
|
||||
cfg.Logger.Log(s)
|
||||
fallthrough
|
||||
default:
|
||||
maxRetries := aws.IntValue(cfg.MaxRetries)
|
||||
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
|
||||
maxRetries = 3
|
||||
}
|
||||
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
|
||||
}
|
||||
|
||||
svc.AddDebugHandlers()
|
||||
|
||||
for _, option := range options {
|
||||
option(svc)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
// NewRequest returns a new Request pointer for the service API
|
||||
// operation and parameters.
|
||||
func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
|
||||
return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
|
||||
}
|
||||
|
||||
// AddDebugHandlers injects debug logging handlers into the service to log request
|
||||
// debug information.
|
||||
func (c *Client) AddDebugHandlers() {
|
||||
if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
|
||||
return
|
||||
}
|
||||
|
||||
c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
|
||||
c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
|
||||
}
|
116
src/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
116
src/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
)
|
||||
|
||||
// DefaultRetryer implements basic retry logic using exponential backoff for
|
||||
// most services. If you want to implement custom retry logic, implement the
|
||||
// request.Retryer interface or create a structure type that composes this
|
||||
// struct and override the specific methods. For example, to override only
|
||||
// the MaxRetries method:
|
||||
//
|
||||
// type retryer struct {
|
||||
// client.DefaultRetryer
|
||||
// }
|
||||
//
|
||||
// // This implementation always has 100 max retries
|
||||
// func (d retryer) MaxRetries() int { return 100 }
|
||||
type DefaultRetryer struct {
|
||||
NumMaxRetries int
|
||||
}
|
||||
|
||||
// MaxRetries returns the number of maximum returns the service will use to make
|
||||
// an individual API request.
|
||||
func (d DefaultRetryer) MaxRetries() int {
|
||||
return d.NumMaxRetries
|
||||
}
|
||||
|
||||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
minTime := 30
|
||||
throttle := d.shouldThrottle(r)
|
||||
if throttle {
|
||||
if delay, ok := getRetryDelay(r); ok {
|
||||
return delay
|
||||
}
|
||||
|
||||
minTime = 500
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
if throttle && retryCount > 8 {
|
||||
retryCount = 8
|
||||
} else if retryCount > 13 {
|
||||
retryCount = 13
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
}
|
||||
|
||||
// ShouldRetry returns true if the request should be retried.
|
||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable != nil {
|
||||
return *r.Retryable
|
||||
}
|
||||
|
||||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||
}
|
||||
|
||||
// ShouldThrottle returns true if the request should be throttled.
|
||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
default:
|
||||
return r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// This will look in the Retry-After header, RFC 7231, for how long
|
||||
// it will wait before attempting another request
|
||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
||||
if !canUseRetryAfterHeader(r) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delayStr := r.HTTPResponse.Header.Get("Retry-After")
|
||||
if len(delayStr) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
delay, err := strconv.Atoi(delayStr)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return time.Duration(delay) * time.Second, true
|
||||
}
|
||||
|
||||
// Will look at the status code to see if the retry header pertains to
|
||||
// the status code.
|
||||
func canUseRetryAfterHeader(r *request.Request) bool {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 503:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
190
src/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
Normal file
190
src/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http/httputil"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const logReqMsg = `DEBUG: Request %s/%s Details:
|
||||
---[ REQUEST POST-SIGN ]-----------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
|
||||
---[ REQUEST DUMP ERROR ]-----------------------------
|
||||
%s
|
||||
------------------------------------------------------`
|
||||
|
||||
type logWriter struct {
|
||||
// Logger is what we will use to log the payload of a response.
|
||||
Logger aws.Logger
|
||||
// buf stores the contents of what has been read
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func (logger *logWriter) Write(b []byte) (int, error) {
|
||||
return logger.buf.Write(b)
|
||||
}
|
||||
|
||||
type teeReaderCloser struct {
|
||||
// io.Reader will be a tee reader that is used during logging.
|
||||
// This structure will read from a body and write the contents to a logger.
|
||||
io.Reader
|
||||
// Source is used just to close when we are done reading.
|
||||
Source io.ReadCloser
|
||||
}
|
||||
|
||||
func (reader *teeReaderCloser) Close() error {
|
||||
return reader.Source.Close()
|
||||
}
|
||||
|
||||
// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will include the HTTP request body if the LogLevel of the
|
||||
// request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPRequestHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequest",
|
||||
Fn: logRequest,
|
||||
}
|
||||
|
||||
func logRequest(r *request.Request) {
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
bodySeekable := aws.IsReaderSeekable(r.Body)
|
||||
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
if logBody {
|
||||
if !bodySeekable {
|
||||
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
|
||||
}
|
||||
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
|
||||
// Body as a NoOpCloser and will not be reset after read by the HTTP
|
||||
// client reader.
|
||||
r.ResetBody()
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
|
||||
// to a service. Will only log the HTTP request's headers. The request payload
|
||||
// will not be read.
|
||||
var LogHTTPRequestHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogRequestHeader",
|
||||
Fn: logRequestHeader,
|
||||
}
|
||||
|
||||
func logRequestHeader(r *request.Request) {
|
||||
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
||||
|
||||
const logRespMsg = `DEBUG: Response %s/%s Details:
|
||||
---[ RESPONSE ]--------------------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
|
||||
---[ RESPONSE DUMP ERROR ]-----------------------------
|
||||
%s
|
||||
-----------------------------------------------------`
|
||||
|
||||
// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
|
||||
// received from a service. Will include the HTTP response body if the LogLevel
|
||||
// of the request matches LogDebugWithHTTPBody.
|
||||
var LogHTTPResponseHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponse",
|
||||
Fn: logResponse,
|
||||
}
|
||||
|
||||
func logResponse(r *request.Request) {
|
||||
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
|
||||
|
||||
if r.HTTPResponse == nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
|
||||
return
|
||||
}
|
||||
|
||||
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
|
||||
if logBody {
|
||||
r.HTTPResponse.Body = &teeReaderCloser{
|
||||
Reader: io.TeeReader(r.HTTPResponse.Body, lw),
|
||||
Source: r.HTTPResponse.Body,
|
||||
}
|
||||
}
|
||||
|
||||
handlerFn := func(req *request.Request) {
|
||||
b, err := httputil.DumpResponse(req.HTTPResponse, false)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
|
||||
|
||||
if logBody {
|
||||
b, err := ioutil.ReadAll(lw.buf)
|
||||
if err != nil {
|
||||
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
req.ClientInfo.ServiceName, req.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
lw.Logger.Log(string(b))
|
||||
}
|
||||
}
|
||||
|
||||
const handlerName = "awsdk.client.LogResponse.ResponseBody"
|
||||
|
||||
r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
|
||||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
|
||||
Name: handlerName, Fn: handlerFn,
|
||||
})
|
||||
}
|
||||
|
||||
// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
|
||||
// response received from a service. Will only log the HTTP response's headers.
|
||||
// The response payload will not be read.
|
||||
var LogHTTPResponseHeaderHandler = request.NamedHandler{
|
||||
Name: "awssdk.client.LogResponseHeader",
|
||||
Fn: logResponseHeader,
|
||||
}
|
||||
|
||||
func logResponseHeader(r *request.Request) {
|
||||
if r.Config.Logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
b, err := httputil.DumpResponse(r.HTTPResponse, false)
|
||||
if err != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, err))
|
||||
return
|
||||
}
|
||||
|
||||
r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
|
||||
r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
|
||||
}
|
13
src/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
13
src/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package metadata
|
||||
|
||||
// ClientInfo wraps immutable data from the client.Client structure.
|
||||
type ClientInfo struct {
|
||||
ServiceName string
|
||||
ServiceID string
|
||||
APIVersion string
|
||||
Endpoint string
|
||||
SigningName string
|
||||
SigningRegion string
|
||||
JSONVersion string
|
||||
TargetPrefix string
|
||||
}
|
536
src/vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
536
src/vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
Normal file
@ -0,0 +1,536 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
)
|
||||
|
||||
// UseServiceDefaultRetries instructs the config to use the service's own
|
||||
// default number of retries. This will be the default action if
|
||||
// Config.MaxRetries is nil also.
|
||||
const UseServiceDefaultRetries = -1
|
||||
|
||||
// RequestRetryer is an alias for a type that implements the request.Retryer
|
||||
// interface.
|
||||
type RequestRetryer interface{}
|
||||
|
||||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the defaults.DefaultConfig structure.
|
||||
//
|
||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// MaxRetries: aws.Int(3),
|
||||
// }))
|
||||
//
|
||||
// // Create S3 service client with a specific Region.
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// })
|
||||
type Config struct {
|
||||
// Enables verbose error printing of all credential chain errors.
|
||||
// Should be used when wanting to see all errors while attempting to
|
||||
// retrieve credentials.
|
||||
CredentialsChainVerboseErrors *bool
|
||||
|
||||
// The credentials object to use when signing requests. Defaults to a
|
||||
// chain of credential providers to search for credentials in environment
|
||||
// variables, shared credential file, and EC2 Instance Roles.
|
||||
Credentials *credentials.Credentials
|
||||
|
||||
// An optional endpoint URL (hostname only or fully qualified URI)
|
||||
// that overrides the default generated endpoint for a client. Set this
|
||||
// to `""` to use the default generated endpoint.
|
||||
//
|
||||
// Note: You must still provide a `Region` value when specifying an
|
||||
// endpoint for a client.
|
||||
Endpoint *string
|
||||
|
||||
// The resolver to use for looking up endpoints for AWS service clients
|
||||
// to use based on region.
|
||||
EndpointResolver endpoints.Resolver
|
||||
|
||||
// EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
|
||||
// ShouldRetry regardless of whether or not if request.Retryable is set.
|
||||
// This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
|
||||
// is not set, then ShouldRetry will only be called if request.Retryable is nil.
|
||||
// Proper handling of the request.Retryable field is important when setting this field.
|
||||
EnforceShouldRetryCheck *bool
|
||||
|
||||
// The region to send requests to. This parameter is required and must
|
||||
// be configured globally or on a per-client basis unless otherwise
|
||||
// noted. A full list of regions is found in the "Regions and Endpoints"
|
||||
// document.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
|
||||
// Regions and Endpoints.
|
||||
Region *string
|
||||
|
||||
// Set this to `true` to disable SSL when sending requests. Defaults
|
||||
// to `false`.
|
||||
DisableSSL *bool
|
||||
|
||||
// The HTTP client to use when sending requests. Defaults to
|
||||
// `http.DefaultClient`.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// An integer value representing the logging level. The default log level
|
||||
// is zero (LogOff), which represents no logging. To enable logging set
|
||||
// to a LogLevel Value.
|
||||
LogLevel *LogLevelType
|
||||
|
||||
// The logger writer interface to write logging messages to. Defaults to
|
||||
// standard out.
|
||||
Logger Logger
|
||||
|
||||
// The maximum number of times that a request will be retried for failures.
|
||||
// Defaults to -1, which defers the max retry setting to the service
|
||||
// specific configuration.
|
||||
MaxRetries *int
|
||||
|
||||
// Retryer guides how HTTP requests should be retried in case of
|
||||
// recoverable failures.
|
||||
//
|
||||
// When nil or the value does not implement the request.Retryer interface,
|
||||
// the client.DefaultRetryer will be used.
|
||||
//
|
||||
// When both Retryer and MaxRetries are non-nil, the former is used and
|
||||
// the latter ignored.
|
||||
//
|
||||
// To set the Retryer field in a type-safe manner and with chaining, use
|
||||
// the request.WithRetryer helper function:
|
||||
//
|
||||
// cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
|
||||
//
|
||||
Retryer RequestRetryer
|
||||
|
||||
// Disables semantic parameter validation, which validates input for
|
||||
// missing required fields and/or other semantic request input errors.
|
||||
DisableParamValidation *bool
|
||||
|
||||
// Disables the computation of request and response checksums, e.g.,
|
||||
// CRC32 checksums in Amazon DynamoDB.
|
||||
DisableComputeChecksums *bool
|
||||
|
||||
// Set this to `true` to force the request to use path-style addressing,
|
||||
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
|
||||
// will use virtual hosted bucket addressing when possible
|
||||
// (`http://BUCKET.s3.amazonaws.com/KEY`).
|
||||
//
|
||||
// Note: This configuration option is specific to the Amazon S3 service.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
|
||||
// for Amazon S3: Virtual Hosting of Buckets
|
||||
S3ForcePathStyle *bool
|
||||
|
||||
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||
// HTTP client not to send the body until the service responds with a
|
||||
// `continue` status. This is useful to prevent sending the request body
|
||||
// until after the request is authenticated, and validated.
|
||||
//
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||
//
|
||||
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||
// `ExpectContinueTimeout` for information on adjusting the continue wait
|
||||
// timeout. https://golang.org/pkg/net/http/#Transport
|
||||
//
|
||||
// You should use this flag to disble 100-Continue if you experience issues
|
||||
// with proxies or third party S3 compatible services.
|
||||
S3Disable100Continue *bool
|
||||
|
||||
// Set this to `true` to enable S3 Accelerate feature. For all operations
|
||||
// compatible with S3 Accelerate will use the accelerate endpoint for
|
||||
// requests. Requests not compatible will fall back to normal S3 requests.
|
||||
//
|
||||
// The bucket must be enable for accelerate to be used with S3 client with
|
||||
// accelerate enabled. If the bucket is not enabled for accelerate an error
|
||||
// will be returned. The bucket name must be DNS compatible to also work
|
||||
// with accelerate.
|
||||
S3UseAccelerate *bool
|
||||
|
||||
// S3DisableContentMD5Validation config option is temporarily disabled,
|
||||
// For S3 GetObject API calls, #1837.
|
||||
//
|
||||
// Set this to `true` to disable the S3 service client from automatically
|
||||
// adding the ContentMD5 to S3 Object Put and Upload API calls. This option
|
||||
// will also disable the SDK from performing object ContentMD5 validation
|
||||
// on GetObject API calls.
|
||||
S3DisableContentMD5Validation *bool
|
||||
|
||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||
// default http.Client's Timeout. This is helpful if you do not want the
|
||||
// EC2Metadata client to create a new http.Client. This options is only
|
||||
// meaningful if you're not already using a custom HTTP client with the
|
||||
// SDK. Enabled by default.
|
||||
//
|
||||
// Must be set and provided to the session.NewSession() in order to disable
|
||||
// the EC2Metadata overriding the timeout for default credentials chain.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(aws.NewConfig()
|
||||
// .WithEC2MetadataDiableTimeoutOverride(true)))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
//
|
||||
EC2MetadataDisableTimeoutOverride *bool
|
||||
|
||||
// Instructs the endpoint to be generated for a service client to
|
||||
// be the dual stack endpoint. The dual stack endpoint will support
|
||||
// both IPv4 and IPv6 addressing.
|
||||
//
|
||||
// Setting this for a service which does not support dual stack will fail
|
||||
// to make requets. It is not recommended to set this value on the session
|
||||
// as it will apply to all service clients created with the session. Even
|
||||
// services which don't support dual stack endpoints.
|
||||
//
|
||||
// If the Endpoint config value is also provided the UseDualStack flag
|
||||
// will be ignored.
|
||||
//
|
||||
// Only supported with.
|
||||
//
|
||||
// sess := session.Must(session.NewSession())
|
||||
//
|
||||
// svc := s3.New(sess, &aws.Config{
|
||||
// UseDualStack: aws.Bool(true),
|
||||
// })
|
||||
UseDualStack *bool
|
||||
|
||||
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||
// during the lifecycle of a request. Specifically this will be used for
|
||||
// request delays. This value should only be used for testing. To adjust
|
||||
// the delay of a request see the aws/client.DefaultRetryer and
|
||||
// aws/request.Retryer.
|
||||
//
|
||||
// SleepDelay will prevent any Context from being used for canceling retry
|
||||
// delay of an API operation. It is recommended to not use SleepDelay at all
|
||||
// and specify a Retryer instead.
|
||||
SleepDelay func(time.Duration)
|
||||
|
||||
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
|
||||
// Will default to false. This would only be used for empty directory names in s3 requests.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// DisableRestProtocolURICleaning: aws.Bool(true),
|
||||
// }))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("//foo//bar//moo"),
|
||||
// })
|
||||
DisableRestProtocolURICleaning *bool
|
||||
|
||||
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
|
||||
// have the definition in its model. By default, endpoint discovery is off.
|
||||
//
|
||||
// Example:
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// EnableEndpointDiscovery: aws.Bool(true),
|
||||
// }))
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput {
|
||||
// Bucket: aws.String("bucketname"),
|
||||
// Key: aws.String("/foo/bar/moo"),
|
||||
// })
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
|
||||
// request endpoint hosts with modeled information.
|
||||
//
|
||||
// Disabling this feature is useful when you want to use local endpoints
|
||||
// for testing that do not support the modeled host prefix pattern.
|
||||
DisableEndpointHostPrefix *bool
|
||||
}
|
||||
|
||||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
// methods to set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(aws.NewConfig().
|
||||
// WithMaxRetries(3),
|
||||
// ))
|
||||
//
|
||||
// // Create S3 service client with a specific Region.
|
||||
// svc := s3.New(sess, aws.NewConfig().
|
||||
// WithRegion("us-west-2"),
|
||||
// )
|
||||
func NewConfig() *Config {
|
||||
return &Config{}
|
||||
}
|
||||
|
||||
// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
|
||||
// a Config pointer.
|
||||
func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
|
||||
c.CredentialsChainVerboseErrors = &verboseErrs
|
||||
return c
|
||||
}
|
||||
|
||||
// WithCredentials sets a config Credentials value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
|
||||
c.Credentials = creds
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpoint sets a config Endpoint value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithEndpoint(endpoint string) *Config {
|
||||
c.Endpoint = &endpoint
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpointResolver sets a config EndpointResolver value returning a
|
||||
// Config pointer for chaining.
|
||||
func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
|
||||
c.EndpointResolver = resolver
|
||||
return c
|
||||
}
|
||||
|
||||
// WithRegion sets a config Region value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithRegion(region string) *Config {
|
||||
c.Region = ®ion
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableSSL sets a config DisableSSL value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithDisableSSL(disable bool) *Config {
|
||||
c.DisableSSL = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithHTTPClient sets a config HTTPClient value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithHTTPClient(client *http.Client) *Config {
|
||||
c.HTTPClient = client
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMaxRetries sets a config MaxRetries value returning a Config pointer
|
||||
// for chaining.
|
||||
func (c *Config) WithMaxRetries(max int) *Config {
|
||||
c.MaxRetries = &max
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableParamValidation sets a config DisableParamValidation value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableParamValidation(disable bool) *Config {
|
||||
c.DisableParamValidation = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableComputeChecksums sets a config DisableComputeChecksums value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
|
||||
c.DisableComputeChecksums = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogLevel sets a config LogLevel value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogLevel(level LogLevelType) *Config {
|
||||
c.LogLevel = &level
|
||||
return c
|
||||
}
|
||||
|
||||
// WithLogger sets a config Logger value returning a Config pointer for
|
||||
// chaining.
|
||||
func (c *Config) WithLogger(logger Logger) *Config {
|
||||
c.Logger = logger
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||
c.S3ForcePathStyle = &force
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||
// a Config pointer for chaining.
|
||||
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||
c.S3Disable100Continue = &disable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||
c.S3UseAccelerate = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithS3DisableContentMD5Validation sets a config
|
||||
// S3DisableContentMD5Validation value returning a Config pointer for chaining.
|
||||
func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
|
||||
c.S3DisableContentMD5Validation = &enable
|
||||
return c
|
||||
|
||||
}
|
||||
|
||||
// WithUseDualStack sets a config UseDualStack value returning a Config
|
||||
// pointer for chaining.
|
||||
func (c *Config) WithUseDualStack(enable bool) *Config {
|
||||
c.UseDualStack = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||
// returning a Config pointer for chaining.
|
||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||
c.EC2MetadataDisableTimeoutOverride = &enable
|
||||
return c
|
||||
}
|
||||
|
||||
// WithSleepDelay overrides the function used to sleep while waiting for the
|
||||
// next retry. Defaults to time.Sleep.
|
||||
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
|
||||
c.SleepDelay = fn
|
||||
return c
|
||||
}
|
||||
|
||||
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
|
||||
func (c *Config) WithEndpointDiscovery(t bool) *Config {
|
||||
c.EnableEndpointDiscovery = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
|
||||
// when making requests.
|
||||
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
|
||||
c.DisableEndpointHostPrefix = &t
|
||||
return c
|
||||
}
|
||||
|
||||
// MergeIn merges the passed in configs into the existing config object.
|
||||
func (c *Config) MergeIn(cfgs ...*Config) {
|
||||
for _, other := range cfgs {
|
||||
mergeInConfig(c, other)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeInConfig(dst *Config, other *Config) {
|
||||
if other == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if other.CredentialsChainVerboseErrors != nil {
|
||||
dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
|
||||
}
|
||||
|
||||
if other.Credentials != nil {
|
||||
dst.Credentials = other.Credentials
|
||||
}
|
||||
|
||||
if other.Endpoint != nil {
|
||||
dst.Endpoint = other.Endpoint
|
||||
}
|
||||
|
||||
if other.EndpointResolver != nil {
|
||||
dst.EndpointResolver = other.EndpointResolver
|
||||
}
|
||||
|
||||
if other.Region != nil {
|
||||
dst.Region = other.Region
|
||||
}
|
||||
|
||||
if other.DisableSSL != nil {
|
||||
dst.DisableSSL = other.DisableSSL
|
||||
}
|
||||
|
||||
if other.HTTPClient != nil {
|
||||
dst.HTTPClient = other.HTTPClient
|
||||
}
|
||||
|
||||
if other.LogLevel != nil {
|
||||
dst.LogLevel = other.LogLevel
|
||||
}
|
||||
|
||||
if other.Logger != nil {
|
||||
dst.Logger = other.Logger
|
||||
}
|
||||
|
||||
if other.MaxRetries != nil {
|
||||
dst.MaxRetries = other.MaxRetries
|
||||
}
|
||||
|
||||
if other.Retryer != nil {
|
||||
dst.Retryer = other.Retryer
|
||||
}
|
||||
|
||||
if other.DisableParamValidation != nil {
|
||||
dst.DisableParamValidation = other.DisableParamValidation
|
||||
}
|
||||
|
||||
if other.DisableComputeChecksums != nil {
|
||||
dst.DisableComputeChecksums = other.DisableComputeChecksums
|
||||
}
|
||||
|
||||
if other.S3ForcePathStyle != nil {
|
||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||
}
|
||||
|
||||
if other.S3Disable100Continue != nil {
|
||||
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||
}
|
||||
|
||||
if other.S3UseAccelerate != nil {
|
||||
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||
}
|
||||
|
||||
if other.S3DisableContentMD5Validation != nil {
|
||||
dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
|
||||
}
|
||||
|
||||
if other.UseDualStack != nil {
|
||||
dst.UseDualStack = other.UseDualStack
|
||||
}
|
||||
|
||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||
}
|
||||
|
||||
if other.SleepDelay != nil {
|
||||
dst.SleepDelay = other.SleepDelay
|
||||
}
|
||||
|
||||
if other.DisableRestProtocolURICleaning != nil {
|
||||
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
|
||||
}
|
||||
|
||||
if other.EnforceShouldRetryCheck != nil {
|
||||
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
|
||||
}
|
||||
|
||||
if other.EnableEndpointDiscovery != nil {
|
||||
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
|
||||
}
|
||||
|
||||
if other.DisableEndpointHostPrefix != nil {
|
||||
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
|
||||
}
|
||||
}
|
||||
|
||||
// Copy will return a shallow copy of the Config object. If any additional
|
||||
// configurations are provided they will be merged into the new config returned.
|
||||
func (c *Config) Copy(cfgs ...*Config) *Config {
|
||||
dst := &Config{}
|
||||
dst.MergeIn(c)
|
||||
|
||||
for _, cfg := range cfgs {
|
||||
dst.MergeIn(cfg)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
37
src/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
generated
vendored
Normal file
37
src/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// +build !go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
|
||||
// It is represented as a SDK interface to enable you to use the "WithContext"
|
||||
// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context interface {
|
||||
// Deadline returns the time when work done on behalf of this context
|
||||
// should be canceled. Deadline returns ok==false when no deadline is
|
||||
// set. Successive calls to Deadline return the same results.
|
||||
Deadline() (deadline time.Time, ok bool)
|
||||
|
||||
// Done returns a channel that's closed when work done on behalf of this
|
||||
// context should be canceled. Done may return nil if this context can
|
||||
// never be canceled. Successive calls to Done return the same value.
|
||||
Done() <-chan struct{}
|
||||
|
||||
// Err returns a non-nil error value after Done is closed. Err returns
|
||||
// Canceled if the context was canceled or DeadlineExceeded if the
|
||||
// context's deadline passed. No other values for Err are defined.
|
||||
// After Done is closed, successive calls to Err return the same value.
|
||||
Err() error
|
||||
|
||||
// Value returns the value associated with this context for key, or nil
|
||||
// if no value is associated with key. Successive calls to Value with
|
||||
// the same key returns the same result.
|
||||
//
|
||||
// Use context values only for request-scoped data that transits
|
||||
// processes and API boundaries, not for passing optional parameters to
|
||||
// functions.
|
||||
Value(key interface{}) interface{}
|
||||
}
|
11
src/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
11
src/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build go1.9
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// Context is an alias of the Go stdlib's context.Context interface.
|
||||
// It can be used within the SDK's API operation "WithContext" methods.
|
||||
//
|
||||
// See https://golang.org/pkg/context on how to use contexts.
|
||||
type Context = context.Context
|
56
src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
generated
vendored
Normal file
56
src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// +build !go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
|
||||
// provide a 1.6 and 1.5 safe version of context that is compatible with Go
|
||||
// 1.7's Context.
|
||||
//
|
||||
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
|
||||
// struct{}, since vars of this type must have distinct addresses.
|
||||
type emptyCtx int
|
||||
|
||||
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func (*emptyCtx) Done() <-chan struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*emptyCtx) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *emptyCtx) String() string {
|
||||
switch e {
|
||||
case backgroundCtx:
|
||||
return "aws.BackgroundContext"
|
||||
}
|
||||
return "unknown empty Context"
|
||||
}
|
||||
|
||||
var (
|
||||
backgroundCtx = new(emptyCtx)
|
||||
)
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return backgroundCtx
|
||||
}
|
20
src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
20
src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
// +build go1.7
|
||||
|
||||
package aws
|
||||
|
||||
import "context"
|
||||
|
||||
// BackgroundContext returns a context that will never be canceled, has no
|
||||
// values, and no deadline. This context is used by the SDK to provide
|
||||
// backwards compatibility with non-context API operations and functionality.
|
||||
//
|
||||
// Go 1.6 and before:
|
||||
// This context function is equivalent to context.Background in the Go stdlib.
|
||||
//
|
||||
// Go 1.7 and later:
|
||||
// The context returned will be the value returned by context.Background()
|
||||
//
|
||||
// See https://golang.org/pkg/context for more information on Contexts.
|
||||
func BackgroundContext() Context {
|
||||
return context.Background()
|
||||
}
|
24
src/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
24
src/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SleepWithContext will wait for the timer duration to expire, or the context
|
||||
// is canceled. Which ever happens first. If the context is canceled the Context's
|
||||
// error will be returned.
|
||||
//
|
||||
// Expects Context to always return a non-nil error if the Done channel is closed.
|
||||
func SleepWithContext(ctx Context, dur time.Duration) error {
|
||||
t := time.NewTimer(dur)
|
||||
defer t.Stop()
|
||||
|
||||
select {
|
||||
case <-t.C:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
387
src/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
387
src/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
Normal file
@ -0,0 +1,387 @@
|
||||
package aws
|
||||
|
||||
import "time"
|
||||
|
||||
// String returns a pointer to the string value passed in.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// StringValue returns the value of the string pointer passed in or
|
||||
// "" if the pointer is nil.
|
||||
func StringValue(v *string) string {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// StringSlice converts a slice of string values into a slice of
|
||||
// string pointers
|
||||
func StringSlice(src []string) []*string {
|
||||
dst := make([]*string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueSlice converts a slice of string pointers into a slice of
|
||||
// string values
|
||||
func StringValueSlice(src []*string) []string {
|
||||
dst := make([]string, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringMap converts a string map of string values into a string
|
||||
// map of string pointers
|
||||
func StringMap(src map[string]string) map[string]*string {
|
||||
dst := make(map[string]*string)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// StringValueMap converts a string map of string pointers into a string
|
||||
// map of string values
|
||||
func StringValueMap(src map[string]*string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Bool returns a pointer to the bool value passed in.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// BoolValue returns the value of the bool pointer passed in or
|
||||
// false if the pointer is nil.
|
||||
func BoolValue(v *bool) bool {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BoolSlice converts a slice of bool values into a slice of
|
||||
// bool pointers
|
||||
func BoolSlice(src []bool) []*bool {
|
||||
dst := make([]*bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueSlice converts a slice of bool pointers into a slice of
|
||||
// bool values
|
||||
func BoolValueSlice(src []*bool) []bool {
|
||||
dst := make([]bool, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolMap converts a string map of bool values into a string
|
||||
// map of bool pointers
|
||||
func BoolMap(src map[string]bool) map[string]*bool {
|
||||
dst := make(map[string]*bool)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// BoolValueMap converts a string map of bool pointers into a string
|
||||
// map of bool values
|
||||
func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||
dst := make(map[string]bool)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int returns a pointer to the int value passed in.
|
||||
func Int(v int) *int {
|
||||
return &v
|
||||
}
|
||||
|
||||
// IntValue returns the value of the int pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func IntValue(v *int) int {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IntSlice converts a slice of int values into a slice of
|
||||
// int pointers
|
||||
func IntSlice(src []int) []*int {
|
||||
dst := make([]*int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueSlice converts a slice of int pointers into a slice of
|
||||
// int values
|
||||
func IntValueSlice(src []*int) []int {
|
||||
dst := make([]int, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntMap converts a string map of int values into a string
|
||||
// map of int pointers
|
||||
func IntMap(src map[string]int) map[string]*int {
|
||||
dst := make(map[string]*int)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// IntValueMap converts a string map of int pointers into a string
|
||||
// map of int values
|
||||
func IntValueMap(src map[string]*int) map[string]int {
|
||||
dst := make(map[string]int)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to the int64 value passed in.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int64Value returns the value of the int64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Int64Value(v *int64) int64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Int64Slice converts a slice of int64 values into a slice of
|
||||
// int64 pointers
|
||||
func Int64Slice(src []int64) []*int64 {
|
||||
dst := make([]*int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueSlice converts a slice of int64 pointers into a slice of
|
||||
// int64 values
|
||||
func Int64ValueSlice(src []*int64) []int64 {
|
||||
dst := make([]int64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64Map converts a string map of int64 values into a string
|
||||
// map of int64 pointers
|
||||
func Int64Map(src map[string]int64) map[string]*int64 {
|
||||
dst := make(map[string]*int64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Int64ValueMap converts a string map of int64 pointers into a string
|
||||
// map of int64 values
|
||||
func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||
dst := make(map[string]int64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64 returns a pointer to the float64 value passed in.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64Value returns the value of the float64 pointer passed in or
|
||||
// 0 if the pointer is nil.
|
||||
func Float64Value(v *float64) float64 {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Float64Slice converts a slice of float64 values into a slice of
|
||||
// float64 pointers
|
||||
func Float64Slice(src []float64) []*float64 {
|
||||
dst := make([]*float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueSlice converts a slice of float64 pointers into a slice of
|
||||
// float64 values
|
||||
func Float64ValueSlice(src []*float64) []float64 {
|
||||
dst := make([]float64, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64Map converts a string map of float64 values into a string
|
||||
// map of float64 pointers
|
||||
func Float64Map(src map[string]float64) map[string]*float64 {
|
||||
dst := make(map[string]*float64)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Float64ValueMap converts a string map of float64 pointers into a string
|
||||
// map of float64 values
|
||||
func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||
dst := make(map[string]float64)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// Time returns a pointer to the time.Time value passed in.
|
||||
func Time(v time.Time) *time.Time {
|
||||
return &v
|
||||
}
|
||||
|
||||
// TimeValue returns the value of the time.Time pointer passed in or
|
||||
// time.Time{} if the pointer is nil.
|
||||
func TimeValue(v *time.Time) time.Time {
|
||||
if v != nil {
|
||||
return *v
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// SecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing seconds since Epoch or time.Time{} if the pointer is nil.
|
||||
func SecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix((*v / 1000), 0)
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
|
||||
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
|
||||
func MillisecondsTimeValue(v *int64) time.Time {
|
||||
if v != nil {
|
||||
return time.Unix(0, (*v * 1000000))
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||
//
|
||||
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||
// their unix time values to be in milliseconds.
|
||||
//
|
||||
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||
func TimeUnixMilli(t time.Time) int64 {
|
||||
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||
}
|
||||
|
||||
// TimeSlice converts a slice of time.Time values into a slice of
|
||||
// time.Time pointers
|
||||
func TimeSlice(src []time.Time) []*time.Time {
|
||||
dst := make([]*time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
dst[i] = &(src[i])
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueSlice converts a slice of time.Time pointers into a slice of
|
||||
// time.Time values
|
||||
func TimeValueSlice(src []*time.Time) []time.Time {
|
||||
dst := make([]time.Time, len(src))
|
||||
for i := 0; i < len(src); i++ {
|
||||
if src[i] != nil {
|
||||
dst[i] = *(src[i])
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeMap converts a string map of time.Time values into a string
|
||||
// map of time.Time pointers
|
||||
func TimeMap(src map[string]time.Time) map[string]*time.Time {
|
||||
dst := make(map[string]*time.Time)
|
||||
for k, val := range src {
|
||||
v := val
|
||||
dst[k] = &v
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// TimeValueMap converts a string map of time.Time pointers into a string
|
||||
// map of time.Time values
|
||||
func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
|
||||
dst := make(map[string]time.Time)
|
||||
for k, val := range src {
|
||||
if val != nil {
|
||||
dst[k] = *val
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
228
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
228
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
Normal file
@ -0,0 +1,228 @@
|
||||
package corehandlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// Interface for matching types which also have a Len method.
|
||||
type lener interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||
//
|
||||
// The Content-Length will only be added to the request if the length of the body
|
||||
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||
// header is <= 0, the header will also be stripped.
|
||||
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||
var length int64
|
||||
|
||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||
} else {
|
||||
if r.Body != nil {
|
||||
var err error
|
||||
length, err = aws.SeekerLen(r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if length > 0 {
|
||||
r.HTTPRequest.ContentLength = length
|
||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||
} else {
|
||||
r.HTTPRequest.ContentLength = 0
|
||||
r.HTTPRequest.Header.Del("Content-Length")
|
||||
}
|
||||
}}
|
||||
|
||||
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
|
||||
|
||||
// ValidateReqSigHandler is a request handler to ensure that the request's
|
||||
// signature doesn't expire before it is sent. This can happen when a request
|
||||
// is built and signed significantly before it is sent. Or significant delays
|
||||
// occur when retrying requests that would cause the signature to expire.
|
||||
var ValidateReqSigHandler = request.NamedHandler{
|
||||
Name: "core.ValidateReqSigHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
// Unsigned requests are not signed
|
||||
if r.Config.Credentials == credentials.AnonymousCredentials {
|
||||
return
|
||||
}
|
||||
|
||||
signedTime := r.Time
|
||||
if !r.LastSignedAt.IsZero() {
|
||||
signedTime = r.LastSignedAt
|
||||
}
|
||||
|
||||
// 5 minutes to allow for some clock skew/delays in transmission.
|
||||
// Would be improved with aws/aws-sdk-go#423
|
||||
if signedTime.Add(5 * time.Minute).After(time.Now()) {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("request expired, resigning")
|
||||
r.Sign()
|
||||
},
|
||||
}
|
||||
|
||||
// SendHandler is a request handler to send service request using HTTP client.
|
||||
var SendHandler = request.NamedHandler{
|
||||
Name: "core.SendHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
sender := sendFollowRedirects
|
||||
if r.DisableFollowRedirects {
|
||||
sender = sendWithoutFollowRedirects
|
||||
}
|
||||
|
||||
if request.NoBody == r.HTTPRequest.Body {
|
||||
// Strip off the request body if the NoBody reader was used as a
|
||||
// place holder for a request body. This prevents the SDK from
|
||||
// making requests with a request body when it would be invalid
|
||||
// to do so.
|
||||
//
|
||||
// Use a shallow copy of the http.Request to ensure the race condition
|
||||
// of transport on Body will not trigger
|
||||
reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
|
||||
reqCopy.Body = nil
|
||||
r.HTTPRequest = &reqCopy
|
||||
defer func() {
|
||||
r.HTTPRequest = reqOrig
|
||||
}()
|
||||
}
|
||||
|
||||
var err error
|
||||
r.HTTPResponse, err = sender(r)
|
||||
if err != nil {
|
||||
handleSendError(r, err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func sendFollowRedirects(r *request.Request) (*http.Response, error) {
|
||||
return r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||
}
|
||||
|
||||
func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
|
||||
transport := r.Config.HTTPClient.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
return transport.RoundTrip(r.HTTPRequest)
|
||||
}
|
||||
|
||||
func handleSendError(r *request.Request, err error) {
|
||||
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||
// the body.
|
||||
if r.HTTPResponse != nil {
|
||||
r.HTTPResponse.Body.Close()
|
||||
}
|
||||
// Capture the case where url.Error is returned for error processing
|
||||
// response. e.g. 301 without location header comes back as string
|
||||
// error and r.HTTPResponse is nil. Other URL redirect errors will
|
||||
// comeback in a similar method.
|
||||
if e, ok := err.(*url.Error); ok && e.Err != nil {
|
||||
if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
|
||||
code, _ := strconv.ParseInt(s[1], 10, 64)
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(code),
|
||||
Status: http.StatusText(int(code)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if r.HTTPResponse == nil {
|
||||
// Add a dummy request response object to ensure the HTTPResponse
|
||||
// value is consistent.
|
||||
r.HTTPResponse = &http.Response{
|
||||
StatusCode: int(0),
|
||||
Status: http.StatusText(int(0)),
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
}
|
||||
// Catch all other request errors.
|
||||
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||
r.Retryable = aws.Bool(true) // network errors are retryable
|
||||
|
||||
// Override the error with a context canceled error, if that was canceled.
|
||||
ctx := r.Context()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", ctx.Err())
|
||||
r.Retryable = aws.Bool(false)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateResponseHandler is a request handler to validate service response.
|
||||
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
|
||||
if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
|
||||
// this may be replaced by an UnmarshalError handler
|
||||
r.Error = awserr.New("UnknownError", "unknown error", nil)
|
||||
}
|
||||
}}
|
||||
|
||||
// AfterRetryHandler performs final checks to determine if the request should
|
||||
// be retried and how long to delay.
|
||||
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
||||
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||
}
|
||||
|
||||
if r.WillRetry() {
|
||||
r.RetryDelay = r.RetryRules(r)
|
||||
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility and testing
|
||||
sleepFn(r.RetryDelay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
}
|
||||
|
||||
// when the expired token exception occurs the credentials
|
||||
// need to be expired locally so that the next request to
|
||||
// get credentials will trigger a credentials refresh.
|
||||
if r.IsErrorExpired() {
|
||||
r.Config.Credentials.Expire()
|
||||
}
|
||||
|
||||
r.RetryCount++
|
||||
r.Error = nil
|
||||
}
|
||||
}}
|
||||
|
||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||
// region is not valid.
|
||||
var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
|
||||
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
|
||||
r.Error = aws.ErrMissingRegion
|
||||
} else if r.ClientInfo.Endpoint == "" {
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}}
|
17
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
17
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package corehandlers
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||
// Validating parameters only has meaning if done prior to the request being sent.
|
||||
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||
if !r.ParamsFilled() {
|
||||
return
|
||||
}
|
||||
|
||||
if v, ok := r.Params.(request.Validator); ok {
|
||||
if err := v.Validate(); err != nil {
|
||||
r.Error = err
|
||||
}
|
||||
}
|
||||
}}
|
37
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
37
src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package corehandlers
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
|
||||
// to the user agent.
|
||||
var SDKVersionUserAgentHandler = request.NamedHandler{
|
||||
Name: "core.SDKVersionUserAgentHandler",
|
||||
Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
|
||||
runtime.Version(), runtime.GOOS, runtime.GOARCH),
|
||||
}
|
||||
|
||||
const execEnvVar = `AWS_EXECUTION_ENV`
|
||||
const execEnvUAKey = `exec-env`
|
||||
|
||||
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
|
||||
// execution environment to the user agent.
|
||||
//
|
||||
// If the environment variable AWS_EXECUTION_ENV is set, its value will be
|
||||
// appended to the user agent string.
|
||||
var AddHostExecEnvUserAgentHander = request.NamedHandler{
|
||||
Name: "core.AddHostExecEnvUserAgentHander",
|
||||
Fn: func(r *request.Request) {
|
||||
v := os.Getenv(execEnvVar)
|
||||
if len(v) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
request.AddToUserAgent(r, execEnvUAKey+"/"+v)
|
||||
},
|
||||
}
|
100
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
100
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoValidProvidersFoundInChain Is returned when there are no valid
|
||||
// providers in the ChainProvider.
|
||||
//
|
||||
// This has been deprecated. For verbose error messaging set
|
||||
// aws.Config.CredentialsChainVerboseErrors to true.
|
||||
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
|
||||
`no valid providers in chain. Deprecated.
|
||||
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
|
||||
nil)
|
||||
)
|
||||
|
||||
// A ChainProvider will search for a provider which returns credentials
|
||||
// and cache that provider until Retrieve is called again.
|
||||
//
|
||||
// The ChainProvider provides a way of chaining multiple providers together
|
||||
// which will pick the first available using priority order of the Providers
|
||||
// in the list.
|
||||
//
|
||||
// If none of the Providers retrieve valid credentials Value, ChainProvider's
|
||||
// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
|
||||
//
|
||||
// If a Provider is found which returns valid credentials Value ChainProvider
|
||||
// will cache that Provider for all calls to IsExpired(), until Retrieve is
|
||||
// called again.
|
||||
//
|
||||
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
|
||||
// In this example EnvProvider will first check if any credentials are available
|
||||
// via the environment variables. If there are none ChainProvider will check
|
||||
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
|
||||
// does not return any credentials ChainProvider will return the error
|
||||
// ErrNoValidProvidersFoundInChain
|
||||
//
|
||||
// creds := credentials.NewChainCredentials(
|
||||
// []credentials.Provider{
|
||||
// &credentials.EnvProvider{},
|
||||
// &ec2rolecreds.EC2RoleProvider{
|
||||
// Client: ec2metadata.New(sess),
|
||||
// },
|
||||
// })
|
||||
//
|
||||
// // Usage of ChainCredentials with aws.Config
|
||||
// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
|
||||
// Credentials: creds,
|
||||
// })))
|
||||
//
|
||||
type ChainProvider struct {
|
||||
Providers []Provider
|
||||
curr Provider
|
||||
VerboseErrors bool
|
||||
}
|
||||
|
||||
// NewChainCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a chain of providers.
|
||||
func NewChainCredentials(providers []Provider) *Credentials {
|
||||
return NewCredentials(&ChainProvider{
|
||||
Providers: append([]Provider{}, providers...),
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials value or error if no provider returned
|
||||
// without error.
|
||||
//
|
||||
// If a provider is found it will be cached and any calls to IsExpired()
|
||||
// will return the expired state of the cached provider.
|
||||
func (c *ChainProvider) Retrieve() (Value, error) {
|
||||
var errs []error
|
||||
for _, p := range c.Providers {
|
||||
creds, err := p.Retrieve()
|
||||
if err == nil {
|
||||
c.curr = p
|
||||
return creds, nil
|
||||
}
|
||||
errs = append(errs, err)
|
||||
}
|
||||
c.curr = nil
|
||||
|
||||
var err error
|
||||
err = ErrNoValidProvidersFoundInChain
|
||||
if c.VerboseErrors {
|
||||
err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
|
||||
}
|
||||
return Value{}, err
|
||||
}
|
||||
|
||||
// IsExpired will returned the expired state of the currently cached provider
|
||||
// if there is one. If there is no current provider, true will be returned.
|
||||
func (c *ChainProvider) IsExpired() bool {
|
||||
if c.curr != nil {
|
||||
return c.curr.IsExpired()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
292
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
292
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,292 @@
|
||||
// Package credentials provides credential retrieval and management
|
||||
//
|
||||
// The Credentials is the primary method of getting access to and managing
|
||||
// credentials Values. Using dependency injection retrieval of the credential
|
||||
// values is handled by a object which satisfies the Provider interface.
|
||||
//
|
||||
// By default the Credentials.Get() will cache the successful result of a
|
||||
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
|
||||
// point Credentials will call Provider's Retrieve() to get new credential Value.
|
||||
//
|
||||
// The Provider is responsible for determining when credentials Value have expired.
|
||||
// It is also important to note that Credentials will always call Retrieve the
|
||||
// first time Credentials.Get() is called.
|
||||
//
|
||||
// Example of using the environment variable credentials.
|
||||
//
|
||||
// creds := credentials.NewEnvCredentials()
|
||||
//
|
||||
// // Retrieve the credentials value
|
||||
// credValue, err := creds.Get()
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// Example of forcing credentials to expire and be refreshed on the next Get().
|
||||
// This may be helpful to proactively expire credentials and refresh them sooner
|
||||
// than they would naturally expire on their own.
|
||||
//
|
||||
// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
|
||||
// creds.Expire()
|
||||
// credsValue, err := creds.Get()
|
||||
// // New credentials will be retrieved instead of from cache.
|
||||
//
|
||||
//
|
||||
// Custom Provider
|
||||
//
|
||||
// Each Provider built into this package also provides a helper method to generate
|
||||
// a Credentials pointer setup with the provider. To use a custom Provider just
|
||||
// create a type which satisfies the Provider interface and pass it to the
|
||||
// NewCredentials method.
|
||||
//
|
||||
// type MyProvider struct{}
|
||||
// func (m *MyProvider) Retrieve() (Value, error) {...}
|
||||
// func (m *MyProvider) IsExpired() bool {...}
|
||||
//
|
||||
// creds := credentials.NewCredentials(&MyProvider{})
|
||||
// credValue, err := creds.Get()
|
||||
//
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AnonymousCredentials is an empty Credential object that can be used as
|
||||
// dummy placeholder credentials for requests that do not need signed.
|
||||
//
|
||||
// This Credentials can be used to configure a service to not sign requests
|
||||
// when making service API calls. For example, when accessing public
|
||||
// s3 buckets.
|
||||
//
|
||||
// svc := s3.New(session.Must(session.NewSession(&aws.Config{
|
||||
// Credentials: credentials.AnonymousCredentials,
|
||||
// })))
|
||||
// // Access public S3 buckets.
|
||||
var AnonymousCredentials = NewStaticCredentials("", "", "")
|
||||
|
||||
// A Value is the AWS credentials value for individual credential fields.
|
||||
type Value struct {
|
||||
// AWS Access key ID
|
||||
AccessKeyID string
|
||||
|
||||
// AWS Secret Access Key
|
||||
SecretAccessKey string
|
||||
|
||||
// AWS Session Token
|
||||
SessionToken string
|
||||
|
||||
// Provider used to get credentials
|
||||
ProviderName string
|
||||
}
|
||||
|
||||
// A Provider is the interface for any component which will provide credentials
|
||||
// Value. A provider is required to manage its own Expired state, and what to
|
||||
// be expired means.
|
||||
//
|
||||
// The Provider should not need to implement its own mutexes, because
|
||||
// that will be managed by Credentials.
|
||||
type Provider interface {
|
||||
// Retrieve returns nil if it successfully retrieved the value.
|
||||
// Error is returned if the value were not obtainable, or empty.
|
||||
Retrieve() (Value, error)
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
IsExpired() bool
|
||||
}
|
||||
|
||||
// An Expirer is an interface that Providers can implement to expose the expiration
|
||||
// time, if known. If the Provider cannot accurately provide this info,
|
||||
// it should not implement this interface.
|
||||
type Expirer interface {
|
||||
// The time at which the credentials are no longer valid
|
||||
ExpiresAt() time.Time
|
||||
}
|
||||
|
||||
// An ErrorProvider is a stub credentials provider that always returns an error
|
||||
// this is used by the SDK when construction a known provider is not possible
|
||||
// due to an error.
|
||||
type ErrorProvider struct {
|
||||
// The error to be returned from Retrieve
|
||||
Err error
|
||||
|
||||
// The provider name to set on the Retrieved returned Value
|
||||
ProviderName string
|
||||
}
|
||||
|
||||
// Retrieve will always return the error that the ErrorProvider was created with.
|
||||
func (p ErrorProvider) Retrieve() (Value, error) {
|
||||
return Value{ProviderName: p.ProviderName}, p.Err
|
||||
}
|
||||
|
||||
// IsExpired will always return not expired.
|
||||
func (p ErrorProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// A Expiry provides shared expiration logic to be used by credentials
|
||||
// providers to implement expiry functionality.
|
||||
//
|
||||
// The best method to use this struct is as an anonymous field within the
|
||||
// provider's struct.
|
||||
//
|
||||
// Example:
|
||||
// type EC2RoleProvider struct {
|
||||
// Expiry
|
||||
// ...
|
||||
// }
|
||||
type Expiry struct {
|
||||
// The date/time when to expire on
|
||||
expiration time.Time
|
||||
|
||||
// If set will be used by IsExpired to determine the current time.
|
||||
// Defaults to time.Now if CurrentTime is not set. Available for testing
|
||||
// to be able to mock out the current time.
|
||||
CurrentTime func() time.Time
|
||||
}
|
||||
|
||||
// SetExpiration sets the expiration IsExpired will check when called.
|
||||
//
|
||||
// If window is greater than 0 the expiration time will be reduced by the
|
||||
// window value.
|
||||
//
|
||||
// Using a window is helpful to trigger credentials to expire sooner than
|
||||
// the expiration time given to ensure no requests are made with expired
|
||||
// tokens.
|
||||
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
|
||||
e.expiration = expiration
|
||||
if window > 0 {
|
||||
e.expiration = e.expiration.Add(-window)
|
||||
}
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
func (e *Expiry) IsExpired() bool {
|
||||
curTime := e.CurrentTime
|
||||
if curTime == nil {
|
||||
curTime = time.Now
|
||||
}
|
||||
return e.expiration.Before(curTime())
|
||||
}
|
||||
|
||||
// ExpiresAt returns the expiration time of the credential
|
||||
func (e *Expiry) ExpiresAt() time.Time {
|
||||
return e.expiration
|
||||
}
|
||||
|
||||
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
|
||||
// Credentials will cache the credentials value until they expire. Once the value
|
||||
// expires the next Get will attempt to retrieve valid credentials.
|
||||
//
|
||||
// Credentials is safe to use across multiple goroutines and will manage the
|
||||
// synchronous state so the Providers do not need to implement their own
|
||||
// synchronization.
|
||||
//
|
||||
// The first Credentials.Get() will always call Provider.Retrieve() to get the
|
||||
// first instance of the credentials Value. All calls to Get() after that
|
||||
// will return the cached credentials Value until IsExpired() returns true.
|
||||
type Credentials struct {
|
||||
creds Value
|
||||
forceRefresh bool
|
||||
|
||||
m sync.RWMutex
|
||||
|
||||
provider Provider
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials with the provider set.
|
||||
func NewCredentials(provider Provider) *Credentials {
|
||||
return &Credentials{
|
||||
provider: provider,
|
||||
forceRefresh: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the credentials value, or error if the credentials Value failed
|
||||
// to be retrieved.
|
||||
//
|
||||
// Will return the cached credentials Value if it has not expired. If the
|
||||
// credentials Value has expired the Provider's Retrieve() will be called
|
||||
// to refresh the credentials.
|
||||
//
|
||||
// If Credentials.Expire() was called the credentials Value will be force
|
||||
// expired, and the next call to Get() will cause them to be refreshed.
|
||||
func (c *Credentials) Get() (Value, error) {
|
||||
// Check the cached credentials first with just the read lock.
|
||||
c.m.RLock()
|
||||
if !c.isExpired() {
|
||||
creds := c.creds
|
||||
c.m.RUnlock()
|
||||
return creds, nil
|
||||
}
|
||||
c.m.RUnlock()
|
||||
|
||||
// Credentials are expired need to retrieve the credentials taking the full
|
||||
// lock.
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
if c.isExpired() {
|
||||
creds, err := c.provider.Retrieve()
|
||||
if err != nil {
|
||||
return Value{}, err
|
||||
}
|
||||
c.creds = creds
|
||||
c.forceRefresh = false
|
||||
}
|
||||
|
||||
return c.creds, nil
|
||||
}
|
||||
|
||||
// Expire expires the credentials and forces them to be retrieved on the
|
||||
// next call to Get().
|
||||
//
|
||||
// This will override the Provider's expired state, and force Credentials
|
||||
// to call the Provider's Retrieve().
|
||||
func (c *Credentials) Expire() {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
|
||||
c.forceRefresh = true
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are no longer valid, and need
|
||||
// to be retrieved.
|
||||
//
|
||||
// If the Credentials were forced to be expired with Expire() this will
|
||||
// reflect that override.
|
||||
func (c *Credentials) IsExpired() bool {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
return c.isExpired()
|
||||
}
|
||||
|
||||
// isExpired helper method wrapping the definition of expired credentials.
|
||||
func (c *Credentials) isExpired() bool {
|
||||
return c.forceRefresh || c.provider.IsExpired()
|
||||
}
|
||||
|
||||
// ExpiresAt provides access to the functionality of the Expirer interface of
|
||||
// the underlying Provider, if it supports that interface. Otherwise, it returns
|
||||
// an error.
|
||||
func (c *Credentials) ExpiresAt() (time.Time, error) {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
|
||||
expirer, ok := c.provider.(Expirer)
|
||||
if !ok {
|
||||
return time.Time{}, awserr.New("ProviderNotExpirer",
|
||||
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
|
||||
nil)
|
||||
}
|
||||
if c.forceRefresh {
|
||||
// set expiration time to the distant past
|
||||
return time.Time{}, nil
|
||||
}
|
||||
return expirer.ExpiresAt(), nil
|
||||
}
|
180
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
180
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
package ec2rolecreds
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// ProviderName provides a name of EC2Role provider
|
||||
const ProviderName = "EC2RoleProvider"
|
||||
|
||||
// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
|
||||
// those credentials are expired.
|
||||
//
|
||||
// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
|
||||
// or ExpiryWindow
|
||||
//
|
||||
// p := &ec2rolecreds.EC2RoleProvider{
|
||||
// // Pass in a custom timeout to be used when requesting
|
||||
// // IAM EC2 Role credentials.
|
||||
// Client: ec2metadata.New(sess, aws.Config{
|
||||
// HTTPClient: &http.Client{Timeout: 10 * time.Second},
|
||||
// }),
|
||||
//
|
||||
// // Do not use early expiry of credentials. If a non zero value is
|
||||
// // specified the credentials will be expired early
|
||||
// ExpiryWindow: 0,
|
||||
// }
|
||||
type EC2RoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// Required EC2Metadata client to use when connecting to EC2 metadata service.
|
||||
Client *ec2metadata.EC2Metadata
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping
|
||||
// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
|
||||
// The ConfigProvider is satisfied by the session.Session type.
|
||||
func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||
p := &EC2RoleProvider{
|
||||
Client: ec2metadata.New(c),
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
|
||||
// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
|
||||
// metadata service.
|
||||
func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
|
||||
p := &EC2RoleProvider{
|
||||
Client: client,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve retrieves credentials from the EC2 service.
|
||||
// Error will be returned if the request fails, or unable to extract
|
||||
// the desired credentials.
|
||||
func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
|
||||
credsList, err := requestCredList(m.Client)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
if len(credsList) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
|
||||
}
|
||||
credsName := credsList[0]
|
||||
|
||||
roleCreds, err := requestCred(m.Client, credsName)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: roleCreds.AccessKeyID,
|
||||
SecretAccessKey: roleCreds.SecretAccessKey,
|
||||
SessionToken: roleCreds.Token,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
|
||||
// request responses.
|
||||
type ec2RoleCredRespBody struct {
|
||||
// Success State
|
||||
Expiration time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
|
||||
// Error state
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
const iamSecurityCredsPath = "iam/security-credentials/"
|
||||
|
||||
// requestCredList requests a list of credentials from the EC2 service.
|
||||
// If there are no credentials, or there is an error making or receiving the request
|
||||
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||
if err != nil {
|
||||
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||
}
|
||||
|
||||
credsList := []string{}
|
||||
s := bufio.NewScanner(strings.NewReader(resp))
|
||||
for s.Scan() {
|
||||
credsList = append(credsList, s.Text())
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, awserr.New(request.ErrCodeSerialization,
|
||||
"failed to read EC2 instance role from metadata service", err)
|
||||
}
|
||||
|
||||
return credsList, nil
|
||||
}
|
||||
|
||||
// requestCred requests the credentials for a specific credentials from the EC2 service.
|
||||
//
|
||||
// If the credentials cannot be found, or there is an error reading the response
|
||||
// and error will be returned.
|
||||
func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
|
||||
resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
|
||||
if err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New("EC2RoleRequestError",
|
||||
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
respCreds := ec2RoleCredRespBody{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||
return ec2RoleCredRespBody{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||
err)
|
||||
}
|
||||
|
||||
if respCreds.Code != "Success" {
|
||||
// If an error code was returned something failed requesting the role.
|
||||
return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
|
||||
}
|
||||
|
||||
return respCreds, nil
|
||||
}
|
203
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
203
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
// Package endpointcreds provides support for retrieving credentials from an
|
||||
// arbitrary HTTP endpoint.
|
||||
//
|
||||
// The credentials endpoint Provider can receive both static and refreshable
|
||||
// credentials that will expire. Credentials are static when an "Expiration"
|
||||
// value is not provided in the endpoint's response.
|
||||
//
|
||||
// Static credentials will never expire once they have been retrieved. The format
|
||||
// of the static credentials response:
|
||||
// {
|
||||
// "AccessKeyId" : "MUA...",
|
||||
// "SecretAccessKey" : "/7PC5om....",
|
||||
// }
|
||||
//
|
||||
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
|
||||
// value in the response. The format of the refreshable credentials response:
|
||||
// {
|
||||
// "AccessKeyId" : "MUA...",
|
||||
// "SecretAccessKey" : "/7PC5om....",
|
||||
// "Token" : "AQoDY....=",
|
||||
// "Expiration" : "2016-02-25T06:03:31Z"
|
||||
// }
|
||||
//
|
||||
// Errors should be returned in the following format and only returned with 400
|
||||
// or 500 HTTP status codes.
|
||||
// {
|
||||
// "code": "ErrorCode",
|
||||
// "message": "Helpful error message."
|
||||
// }
|
||||
package endpointcreds
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
|
||||
)
|
||||
|
||||
// ProviderName is the name of the credentials provider.
|
||||
const ProviderName = `CredentialsEndpointProvider`
|
||||
|
||||
// Provider satisfies the credentials.Provider interface, and is a client to
|
||||
// retrieve credentials from an arbitrary endpoint.
|
||||
type Provider struct {
|
||||
staticCreds bool
|
||||
credentials.Expiry
|
||||
|
||||
// Requires a AWS Client to make HTTP requests to the endpoint with.
|
||||
// the Endpoint the request will be made to is provided by the aws.Config's
|
||||
// Endpoint value.
|
||||
Client *client.Client
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// Optional authorization token value if set will be used as the value of
|
||||
// the Authorization header of the endpoint credential request.
|
||||
AuthorizationToken string
|
||||
}
|
||||
|
||||
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
|
||||
// from arbitrary endpoint.
|
||||
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
|
||||
p := &Provider{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: "CredentialsEndpoint",
|
||||
Endpoint: endpoint,
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
p.Client.Handlers.Validate.Clear()
|
||||
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
||||
// from an arbitrary endpoint concurrently. The client will request the
|
||||
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||
}
|
||||
|
||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||
// retrieved.
|
||||
func (p *Provider) IsExpired() bool {
|
||||
if p.staticCreds {
|
||||
return false
|
||||
}
|
||||
return p.Expiry.IsExpired()
|
||||
}
|
||||
|
||||
// Retrieve will attempt to request the credentials from the endpoint the Provider
|
||||
// was configured for. And error will be returned if the retrieval fails.
|
||||
func (p *Provider) Retrieve() (credentials.Value, error) {
|
||||
resp, err := p.getCredentials()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName},
|
||||
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
|
||||
}
|
||||
|
||||
if resp.Expiration != nil {
|
||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||
} else {
|
||||
p.staticCreds = true
|
||||
}
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: resp.AccessKeyID,
|
||||
SecretAccessKey: resp.SecretAccessKey,
|
||||
SessionToken: resp.Token,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type getCredentialsOutput struct {
|
||||
Expiration *time.Time
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
}
|
||||
|
||||
type errorOutput struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetCredentials",
|
||||
HTTPMethod: "GET",
|
||||
}
|
||||
|
||||
out := &getCredentialsOutput{}
|
||||
req := p.Client.NewRequest(op, nil, out)
|
||||
req.HTTPRequest.Header.Set("Accept", "application/json")
|
||||
if authToken := p.AuthorizationToken; len(authToken) != 0 {
|
||||
req.HTTPRequest.Header.Set("Authorization", authToken)
|
||||
}
|
||||
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
func validateEndpointHandler(r *request.Request) {
|
||||
if len(r.ClientInfo.Endpoint) == 0 {
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalHandler(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
out := r.Data.(*getCredentialsOutput)
|
||||
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode endpoint credentials",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
|
||||
var errOut errorOutput
|
||||
err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.NewRequestFailure(
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode error message", err),
|
||||
r.HTTPResponse.StatusCode,
|
||||
r.RequestID,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// Response body format is not consistent between metadata endpoints.
|
||||
// Grab the error message as a string and include that as the source error
|
||||
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
|
||||
}
|
74
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
74
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// EnvProviderName provides a name of Env provider
|
||||
const EnvProviderName = "EnvProvider"
|
||||
|
||||
var (
|
||||
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
|
||||
// found in the process's environment.
|
||||
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
|
||||
|
||||
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
|
||||
// can't be found in the process's environment.
|
||||
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
|
||||
)
|
||||
|
||||
// A EnvProvider retrieves credentials from the environment variables of the
|
||||
// running process. Environment credentials never expire.
|
||||
//
|
||||
// Environment variables used:
|
||||
//
|
||||
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
|
||||
//
|
||||
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
|
||||
type EnvProvider struct {
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewEnvCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the environment variable provider.
|
||||
func NewEnvCredentials() *Credentials {
|
||||
return NewCredentials(&EnvProvider{})
|
||||
}
|
||||
|
||||
// Retrieve retrieves the keys from the environment.
|
||||
func (e *EnvProvider) Retrieve() (Value, error) {
|
||||
e.retrieved = false
|
||||
|
||||
id := os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if id == "" {
|
||||
id = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if secret == "" {
|
||||
secret = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
|
||||
if id == "" {
|
||||
return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
|
||||
}
|
||||
|
||||
if secret == "" {
|
||||
return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
|
||||
}
|
||||
|
||||
e.retrieved = true
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
|
||||
ProviderName: EnvProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials have been retrieved.
|
||||
func (e *EnvProvider) IsExpired() bool {
|
||||
return !e.retrieved
|
||||
}
|
12
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
12
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
[default]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
aws_session_token = token
|
||||
|
||||
[no_token]
|
||||
aws_access_key_id = accessKey
|
||||
aws_secret_access_key = secret
|
||||
|
||||
[with_colon]
|
||||
aws_access_key_id: accessKey
|
||||
aws_secret_access_key: secret
|
425
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
Normal file
425
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
/*
|
||||
Package processcreds is a credential Provider to retrieve `credential_process`
|
||||
credentials.
|
||||
|
||||
WARNING: The following describes a method of sourcing credentials from an external
|
||||
process. This can potentially be dangerous, so proceed with caution. Other
|
||||
credential providers should be preferred if at all possible. If using this
|
||||
option, you should make sure that the config file is as locked down as possible
|
||||
using security best practices for your operating system.
|
||||
|
||||
You can use credentials from a `credential_process` in a variety of ways.
|
||||
|
||||
One way is to setup your shared config file, located in the default
|
||||
location, with the `credential_process` key and the command you want to be
|
||||
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
|
||||
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
|
||||
|
||||
[default]
|
||||
credential_process = /command/to/call
|
||||
|
||||
Creating a new session will use the credential process to retrieve credentials.
|
||||
NOTE: If there are credentials in the profile you are using, the credential
|
||||
process will not be used.
|
||||
|
||||
// Initialize a session to load credentials.
|
||||
sess, _ := session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-east-1")},
|
||||
)
|
||||
|
||||
// Create S3 service client to use the credentials.
|
||||
svc := s3.New(sess)
|
||||
|
||||
Another way to use the `credential_process` method is by using
|
||||
`credentials.NewCredentials()` and providing a command to be executed to
|
||||
retrieve credentials:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentials("/path/to/command")
|
||||
|
||||
// Create service client value configured for credentials.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
You can set a non-default timeout for the `credential_process` with another
|
||||
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
|
||||
set a one minute timeout:
|
||||
|
||||
// Create credentials using the ProcessProvider.
|
||||
creds := processcreds.NewCredentialsTimeout(
|
||||
"/path/to/command",
|
||||
time.Duration(500) * time.Millisecond)
|
||||
|
||||
If you need more control, you can set any configurable options in the
|
||||
credentials using one or more option functions. For example, you can set a two
|
||||
minute timeout, a credential duration of 60 minutes, and a maximum stdout
|
||||
buffer size of 2k.
|
||||
|
||||
creds := processcreds.NewCredentials(
|
||||
"/path/to/command",
|
||||
func(opt *ProcessProvider) {
|
||||
opt.Timeout = time.Duration(2) * time.Minute
|
||||
opt.Duration = time.Duration(60) * time.Minute
|
||||
opt.MaxBufSize = 2048
|
||||
})
|
||||
|
||||
You can also use your own `exec.Cmd`:
|
||||
|
||||
// Create an exec.Cmd
|
||||
myCommand := exec.Command("/path/to/command")
|
||||
|
||||
// Create credentials using your exec.Cmd and custom timeout
|
||||
creds := processcreds.NewCredentialsCommand(
|
||||
myCommand,
|
||||
func(opt *processcreds.ProcessProvider) {
|
||||
opt.Timeout = time.Duration(1) * time.Second
|
||||
})
|
||||
*/
|
||||
package processcreds
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
// ProviderName is the name this credentials provider will label any
|
||||
// returned credentials Value with.
|
||||
ProviderName = `ProcessProvider`
|
||||
|
||||
// ErrCodeProcessProviderParse error parsing process output
|
||||
ErrCodeProcessProviderParse = "ProcessProviderParseError"
|
||||
|
||||
// ErrCodeProcessProviderVersion version error in output
|
||||
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
|
||||
|
||||
// ErrCodeProcessProviderRequired required attribute missing in output
|
||||
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
|
||||
|
||||
// ErrCodeProcessProviderExecution execution of command failed
|
||||
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
|
||||
|
||||
// errMsgProcessProviderTimeout process took longer than allowed
|
||||
errMsgProcessProviderTimeout = "credential process timed out"
|
||||
|
||||
// errMsgProcessProviderProcess process error
|
||||
errMsgProcessProviderProcess = "error in credential_process"
|
||||
|
||||
// errMsgProcessProviderParse problem parsing output
|
||||
errMsgProcessProviderParse = "parse failed of credential_process output"
|
||||
|
||||
// errMsgProcessProviderVersion version error in output
|
||||
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
|
||||
|
||||
// errMsgProcessProviderMissKey missing access key id in output
|
||||
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
|
||||
|
||||
// errMsgProcessProviderMissSecret missing secret acess key in output
|
||||
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
|
||||
|
||||
// errMsgProcessProviderPrepareCmd prepare of command failed
|
||||
errMsgProcessProviderPrepareCmd = "failed to prepare command"
|
||||
|
||||
// errMsgProcessProviderEmptyCmd command must not be empty
|
||||
errMsgProcessProviderEmptyCmd = "command must not be empty"
|
||||
|
||||
// errMsgProcessProviderPipe failed to initialize pipe
|
||||
errMsgProcessProviderPipe = "failed to initialize pipe"
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the
|
||||
// credentials will be valid for.
|
||||
DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// DefaultBufSize limits buffer size from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
DefaultBufSize = 1024
|
||||
|
||||
// DefaultTimeout default limit on time a process can run.
|
||||
DefaultTimeout = time.Duration(1) * time.Minute
|
||||
)
|
||||
|
||||
// ProcessProvider satisfies the credentials.Provider interface, and is a
|
||||
// client to retrieve credentials from a process.
|
||||
type ProcessProvider struct {
|
||||
staticCreds bool
|
||||
credentials.Expiry
|
||||
originalCommand []string
|
||||
|
||||
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// A string representing an os command that should return a JSON with
|
||||
// credential information.
|
||||
command *exec.Cmd
|
||||
|
||||
// MaxBufSize limits memory usage from growing to an enormous
|
||||
// amount due to a faulty process.
|
||||
MaxBufSize int
|
||||
|
||||
// Timeout limits the time a process can run.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// ProcessProvider. The credentials will expire every 15 minutes by default.
|
||||
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: exec.Command(command),
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsTimeout returns a pointer to a new Credentials object with
|
||||
// the specified command and timeout, and default duration and max buffer size.
|
||||
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
|
||||
p := NewCredentials(command, func(opt *ProcessProvider) {
|
||||
opt.Timeout = timeout
|
||||
})
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsCommand returns a pointer to a new Credentials object with
|
||||
// the specified command, and default timeout, duration and max buffer size.
|
||||
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
|
||||
p := &ProcessProvider{
|
||||
command: command,
|
||||
Duration: DefaultDuration,
|
||||
Timeout: DefaultTimeout,
|
||||
MaxBufSize: DefaultBufSize,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
type credentialProcessResponse struct {
|
||||
Version int
|
||||
AccessKeyID string `json:"AccessKeyId"`
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
Expiration *time.Time
|
||||
}
|
||||
|
||||
// Retrieve executes the 'credential_process' and returns the credentials.
|
||||
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
|
||||
out, err := p.executeCredentialProcess()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// Serialize and validate response
|
||||
resp := &credentialProcessResponse{}
|
||||
if err = json.Unmarshal(out, resp); err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderParse,
|
||||
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
|
||||
err)
|
||||
}
|
||||
|
||||
if resp.Version != 1 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderVersion,
|
||||
errMsgProcessProviderVersion,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.AccessKeyID) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissKey,
|
||||
nil)
|
||||
}
|
||||
|
||||
if len(resp.SecretAccessKey) == 0 {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderRequired,
|
||||
errMsgProcessProviderMissSecret,
|
||||
nil)
|
||||
}
|
||||
|
||||
// Handle expiration
|
||||
p.staticCreds = resp.Expiration == nil
|
||||
if resp.Expiration != nil {
|
||||
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
|
||||
}
|
||||
|
||||
return credentials.Value{
|
||||
ProviderName: ProviderName,
|
||||
AccessKeyID: resp.AccessKeyID,
|
||||
SecretAccessKey: resp.SecretAccessKey,
|
||||
SessionToken: resp.SessionToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsExpired returns true if the credentials retrieved are expired, or not yet
|
||||
// retrieved.
|
||||
func (p *ProcessProvider) IsExpired() bool {
|
||||
if p.staticCreds {
|
||||
return false
|
||||
}
|
||||
return p.Expiry.IsExpired()
|
||||
}
|
||||
|
||||
// prepareCommand prepares the command to be executed.
|
||||
func (p *ProcessProvider) prepareCommand() error {
|
||||
|
||||
var cmdArgs []string
|
||||
if runtime.GOOS == "windows" {
|
||||
cmdArgs = []string{"cmd.exe", "/C"}
|
||||
} else {
|
||||
cmdArgs = []string{"sh", "-c"}
|
||||
}
|
||||
|
||||
if len(p.originalCommand) == 0 {
|
||||
p.originalCommand = make([]string, len(p.command.Args))
|
||||
copy(p.originalCommand, p.command.Args)
|
||||
|
||||
// check for empty command because it succeeds
|
||||
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
|
||||
return awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
fmt.Sprintf(
|
||||
"%s: %s",
|
||||
errMsgProcessProviderPrepareCmd,
|
||||
errMsgProcessProviderEmptyCmd),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
cmdArgs = append(cmdArgs, p.originalCommand...)
|
||||
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
p.command.Env = os.Environ()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// executeCredentialProcess starts the credential process on the OS and
|
||||
// returns the results or an error.
|
||||
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
|
||||
|
||||
if err := p.prepareCommand(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup the pipes
|
||||
outReadPipe, outWritePipe, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, awserr.New(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderPipe,
|
||||
err)
|
||||
}
|
||||
|
||||
p.command.Stderr = os.Stderr // display stderr on console for MFA
|
||||
p.command.Stdout = outWritePipe // get creds json on process's stdout
|
||||
p.command.Stdin = os.Stdin // enable stdin for MFA
|
||||
|
||||
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
|
||||
|
||||
stdoutCh := make(chan error, 1)
|
||||
go readInput(
|
||||
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
|
||||
output,
|
||||
stdoutCh)
|
||||
|
||||
execCh := make(chan error, 1)
|
||||
go executeCommand(*p.command, execCh)
|
||||
|
||||
finished := false
|
||||
var errors []error
|
||||
for !finished {
|
||||
select {
|
||||
case readError := <-stdoutCh:
|
||||
errors = appendError(errors, readError)
|
||||
finished = true
|
||||
case execError := <-execCh:
|
||||
err := outWritePipe.Close()
|
||||
errors = appendError(errors, err)
|
||||
errors = appendError(errors, execError)
|
||||
if errors != nil {
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderProcess,
|
||||
errors)
|
||||
}
|
||||
case <-time.After(p.Timeout):
|
||||
finished = true
|
||||
return output.Bytes(), awserr.NewBatchError(
|
||||
ErrCodeProcessProviderExecution,
|
||||
errMsgProcessProviderTimeout,
|
||||
errors) // errors can be nil
|
||||
}
|
||||
}
|
||||
|
||||
out := output.Bytes()
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// windows adds slashes to quotes
|
||||
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// appendError conveniently checks for nil before appending slice
|
||||
func appendError(errors []error, err error) []error {
|
||||
if err != nil {
|
||||
return append(errors, err)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
func executeCommand(cmd exec.Cmd, exec chan error) {
|
||||
// Start the command
|
||||
err := cmd.Start()
|
||||
if err == nil {
|
||||
err = cmd.Wait()
|
||||
}
|
||||
|
||||
exec <- err
|
||||
}
|
||||
|
||||
func readInput(r io.Reader, w io.Writer, read chan error) {
|
||||
tee := io.TeeReader(r, w)
|
||||
|
||||
_, err := ioutil.ReadAll(tee)
|
||||
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
read <- err // will only arrive here when write end of pipe is closed
|
||||
}
|
150
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
150
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/internal/ini"
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// SharedCredsProviderName provides a name of SharedCreds provider
|
||||
const SharedCredsProviderName = "SharedCredentialsProvider"
|
||||
|
||||
var (
|
||||
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
|
||||
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
|
||||
)
|
||||
|
||||
// A SharedCredentialsProvider retrieves credentials from the current user's home
|
||||
// directory, and keeps track if those credentials are expired.
|
||||
//
|
||||
// Profile ini file example: $HOME/.aws/credentials
|
||||
type SharedCredentialsProvider struct {
|
||||
// Path to the shared credentials file.
|
||||
//
|
||||
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
|
||||
// env value is empty will default to current user's home directory.
|
||||
// Linux/OSX: "$HOME/.aws/credentials"
|
||||
// Windows: "%USERPROFILE%\.aws\credentials"
|
||||
Filename string
|
||||
|
||||
// AWS Profile to extract credentials from the shared credentials file. If empty
|
||||
// will default to environment variable "AWS_PROFILE" or "default" if
|
||||
// environment variable is also not set.
|
||||
Profile string
|
||||
|
||||
// retrieved states if the credentials have been successfully retrieved.
|
||||
retrieved bool
|
||||
}
|
||||
|
||||
// NewSharedCredentials returns a pointer to a new Credentials object
|
||||
// wrapping the Profile file provider.
|
||||
func NewSharedCredentials(filename, profile string) *Credentials {
|
||||
return NewCredentials(&SharedCredentialsProvider{
|
||||
Filename: filename,
|
||||
Profile: profile,
|
||||
})
|
||||
}
|
||||
|
||||
// Retrieve reads and extracts the shared credentials from the current
|
||||
// users home directory.
|
||||
func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
|
||||
p.retrieved = false
|
||||
|
||||
filename, err := p.filename()
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, err
|
||||
}
|
||||
|
||||
creds, err := loadProfile(filename, p.profile())
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, err
|
||||
}
|
||||
|
||||
p.retrieved = true
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the shared credentials have expired.
|
||||
func (p *SharedCredentialsProvider) IsExpired() bool {
|
||||
return !p.retrieved
|
||||
}
|
||||
|
||||
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
|
||||
// The credentials retrieved from the profile will be returned or error. Error will be
|
||||
// returned if it fails to read from the file, or the data is invalid.
|
||||
func loadProfile(filename, profile string) (Value, error) {
|
||||
config, err := ini.OpenFile(filename)
|
||||
if err != nil {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
|
||||
}
|
||||
|
||||
iniProfile, ok := config.GetSection(profile)
|
||||
if !ok {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
|
||||
}
|
||||
|
||||
id := iniProfile.String("aws_access_key_id")
|
||||
if len(id) == 0 {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
secret := iniProfile.String("aws_secret_access_key")
|
||||
if len(secret) == 0 {
|
||||
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
|
||||
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
|
||||
nil)
|
||||
}
|
||||
|
||||
// Default to empty string if not found
|
||||
token := iniProfile.String("aws_session_token")
|
||||
|
||||
return Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
ProviderName: SharedCredsProviderName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// filename returns the filename to use to read AWS shared credentials.
|
||||
//
|
||||
// Will return an error if the user's home directory path cannot be found.
|
||||
func (p *SharedCredentialsProvider) filename() (string, error) {
|
||||
if len(p.Filename) != 0 {
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
if home := shareddefaults.UserHomeDir(); len(home) == 0 {
|
||||
// Backwards compatibility of home directly not found error being returned.
|
||||
// This error is too verbose, failure when opening the file would of been
|
||||
// a better error to return.
|
||||
return "", ErrSharedCredentialsHomeNotFound
|
||||
}
|
||||
|
||||
p.Filename = shareddefaults.SharedCredentialsFilename()
|
||||
|
||||
return p.Filename, nil
|
||||
}
|
||||
|
||||
// profile returns the AWS shared credentials profile. If empty will read
|
||||
// environment variable "AWS_PROFILE". If that is not set profile will
|
||||
// return "default".
|
||||
func (p *SharedCredentialsProvider) profile() string {
|
||||
if p.Profile == "" {
|
||||
p.Profile = os.Getenv("AWS_PROFILE")
|
||||
}
|
||||
if p.Profile == "" {
|
||||
p.Profile = "default"
|
||||
}
|
||||
|
||||
return p.Profile
|
||||
}
|
55
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
55
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// StaticProviderName provides a name of Static provider
|
||||
const StaticProviderName = "StaticProvider"
|
||||
|
||||
var (
|
||||
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
|
||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||
)
|
||||
|
||||
// A StaticProvider is a set of credentials which are set programmatically,
|
||||
// and will never expire.
|
||||
type StaticProvider struct {
|
||||
Value
|
||||
}
|
||||
|
||||
// NewStaticCredentials returns a pointer to a new Credentials object
|
||||
// wrapping a static credentials value provider.
|
||||
func NewStaticCredentials(id, secret, token string) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: Value{
|
||||
AccessKeyID: id,
|
||||
SecretAccessKey: secret,
|
||||
SessionToken: token,
|
||||
}})
|
||||
}
|
||||
|
||||
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
|
||||
// wrapping the static credentials value provide. Same as NewStaticCredentials
|
||||
// but takes the creds Value instead of individual fields
|
||||
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
|
||||
return NewCredentials(&StaticProvider{Value: creds})
|
||||
}
|
||||
|
||||
// Retrieve returns the credentials or error if the credentials are invalid.
|
||||
func (s *StaticProvider) Retrieve() (Value, error) {
|
||||
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
|
||||
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
|
||||
}
|
||||
|
||||
if len(s.Value.ProviderName) == 0 {
|
||||
s.Value.ProviderName = StaticProviderName
|
||||
}
|
||||
return s.Value, nil
|
||||
}
|
||||
|
||||
// IsExpired returns if the credentials are expired.
|
||||
//
|
||||
// For StaticProvider, the credentials never expired.
|
||||
func (s *StaticProvider) IsExpired() bool {
|
||||
return false
|
||||
}
|
313
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
313
src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
generated
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
/*
|
||||
Package stscreds are credential Providers to retrieve STS AWS credentials.
|
||||
|
||||
STS provides multiple ways to retrieve credentials which can be used when making
|
||||
future AWS service API operation calls.
|
||||
|
||||
The SDK will ensure that per instance of credentials.Credentials all requests
|
||||
to refresh the credentials will be synchronized. But, the SDK is unable to
|
||||
ensure synchronous usage of the AssumeRoleProvider if the value is shared
|
||||
between multiple Credentials, Sessions or service clients.
|
||||
|
||||
Assume Role
|
||||
|
||||
To assume an IAM role using STS with the SDK you can create a new Credentials
|
||||
with the SDKs's stscreds package.
|
||||
|
||||
// Initial credentials loaded from SDK's default credential chain. Such as
|
||||
// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
|
||||
// Role. These credentials will be used to to make the STS Assume Role API.
|
||||
sess := session.Must(session.NewSession())
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn")
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
Assume Role with static MFA Token
|
||||
|
||||
To assume an IAM role with a MFA token you can either specify a MFA token code
|
||||
directly or provide a function to prompt the user each time the credentials
|
||||
need to refresh the role's credentials. Specifying the TokenCode should be used
|
||||
for short lived operations that will not need to be refreshed, and when you do
|
||||
not want to have direct control over the user provides their MFA token.
|
||||
|
||||
With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
|
||||
credentials.
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN using the MFA token code provided.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
||||
p.TokenCode = aws.String("00000000")
|
||||
})
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
Assume Role with MFA Token Provider
|
||||
|
||||
To assume an IAM role with MFA for longer running tasks where the credentials
|
||||
may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
|
||||
will allow the credential provider to prompt for new MFA token code when the
|
||||
role's credentials need to be refreshed.
|
||||
|
||||
The StdinTokenProvider function is available to prompt on stdin to retrieve
|
||||
the MFA token code from the user. You can also implement custom prompts by
|
||||
satisfing the TokenProvider function signature.
|
||||
|
||||
Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
||||
have undesirable results as the StdinTokenProvider will not be synchronized. A
|
||||
single Credentials with an AssumeRoleProvider can be shared safely.
|
||||
|
||||
// Create the credentials from AssumeRoleProvider to assume the role
|
||||
// referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
|
||||
creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
|
||||
p.SerialNumber = aws.String("myTokenSerialNumber")
|
||||
p.TokenProvider = stscreds.StdinTokenProvider
|
||||
})
|
||||
|
||||
// Create service client value configured for credentials
|
||||
// from assumed role.
|
||||
svc := s3.New(sess, &aws.Config{Credentials: creds})
|
||||
|
||||
*/
|
||||
package stscreds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkrand"
|
||||
"github.com/aws/aws-sdk-go/service/sts"
|
||||
)
|
||||
|
||||
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
|
||||
// An error is returned if reading from stdin fails.
|
||||
//
|
||||
// Use this function go read MFA tokens from stdin. The function makes no attempt
|
||||
// to make atomic prompts from stdin across multiple gorouties.
|
||||
//
|
||||
// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
|
||||
// have undesirable results as the StdinTokenProvider will not be synchronized. A
|
||||
// single Credentials with an AssumeRoleProvider can be shared safely
|
||||
//
|
||||
// Will wait forever until something is provided on the stdin.
|
||||
func StdinTokenProvider() (string, error) {
|
||||
var v string
|
||||
fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
|
||||
_, err := fmt.Scanln(&v)
|
||||
|
||||
return v, err
|
||||
}
|
||||
|
||||
// ProviderName provides a name of AssumeRole provider
|
||||
const ProviderName = "AssumeRoleProvider"
|
||||
|
||||
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
|
||||
type AssumeRoler interface {
|
||||
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
|
||||
}
|
||||
|
||||
// DefaultDuration is the default amount of time in minutes that the credentials
|
||||
// will be valid for.
|
||||
var DefaultDuration = time.Duration(15) * time.Minute
|
||||
|
||||
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
|
||||
// keeps track of their expiration time.
|
||||
//
|
||||
// This credential provider will be used by the SDKs default credential change
|
||||
// when shared configuration is enabled, and the shared config or shared credentials
|
||||
// file configure assume role. See Session docs for how to do this.
|
||||
//
|
||||
// AssumeRoleProvider does not provide any synchronization and it is not safe
|
||||
// to share this value across multiple Credentials, Sessions, or service clients
|
||||
// without also sharing the same Credentials instance.
|
||||
type AssumeRoleProvider struct {
|
||||
credentials.Expiry
|
||||
|
||||
// STS client to make assume role request with.
|
||||
Client AssumeRoler
|
||||
|
||||
// Role to be assumed.
|
||||
RoleARN string
|
||||
|
||||
// Session name, if you wish to reuse the credentials elsewhere.
|
||||
RoleSessionName string
|
||||
|
||||
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
|
||||
Duration time.Duration
|
||||
|
||||
// Optional ExternalID to pass along, defaults to nil if not set.
|
||||
ExternalID *string
|
||||
|
||||
// The policy plain text must be 2048 bytes or shorter. However, an internal
|
||||
// conversion compresses it into a packed binary format with a separate limit.
|
||||
// The PackedPolicySize response element indicates by percentage how close to
|
||||
// the upper size limit the policy is, with 100% equaling the maximum allowed
|
||||
// size.
|
||||
Policy *string
|
||||
|
||||
// The identification number of the MFA device that is associated with the user
|
||||
// who is making the AssumeRole call. Specify this value if the trust policy
|
||||
// of the role being assumed includes a condition that requires MFA authentication.
|
||||
// The value is either the serial number for a hardware device (such as GAHT12345678)
|
||||
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
|
||||
SerialNumber *string
|
||||
|
||||
// The value provided by the MFA device, if the trust policy of the role being
|
||||
// assumed requires MFA (that is, if the policy includes a condition that tests
|
||||
// for MFA). If the role being assumed requires MFA and if the TokenCode value
|
||||
// is missing or expired, the AssumeRole call returns an "access denied" error.
|
||||
//
|
||||
// If SerialNumber is set and neither TokenCode nor TokenProvider are also
|
||||
// set an error will be returned.
|
||||
TokenCode *string
|
||||
|
||||
// Async method of providing MFA token code for assuming an IAM role with MFA.
|
||||
// The value returned by the function will be used as the TokenCode in the Retrieve
|
||||
// call. See StdinTokenProvider for a provider that prompts and reads from stdin.
|
||||
//
|
||||
// This token provider will be called when ever the assumed role's
|
||||
// credentials need to be refreshed when SerialNumber is also set and
|
||||
// TokenCode is not set.
|
||||
//
|
||||
// If both TokenCode and TokenProvider is set, TokenProvider will be used and
|
||||
// TokenCode is ignored.
|
||||
TokenProvider func() (string, error)
|
||||
|
||||
// ExpiryWindow will allow the credentials to trigger refreshing prior to
|
||||
// the credentials actually expiring. This is beneficial so race conditions
|
||||
// with expiring credentials do not cause request to fail unexpectedly
|
||||
// due to ExpiredTokenException exceptions.
|
||||
//
|
||||
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
|
||||
// 10 seconds before the credentials are actually expired.
|
||||
//
|
||||
// If ExpiryWindow is 0 or less it will be ignored.
|
||||
ExpiryWindow time.Duration
|
||||
|
||||
// MaxJitterFrac reduces the effective Duration of each credential requested
|
||||
// by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
|
||||
// have a value between 0 and 1. Any other value may lead to expected behavior.
|
||||
// With a MaxJitterFrac value of 0, default) will no jitter will be used.
|
||||
//
|
||||
// For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
|
||||
// AssumeRole call will be made with an arbitrary Duration between 27m and
|
||||
// 30m.
|
||||
//
|
||||
// MaxJitterFrac should not be negative.
|
||||
MaxJitterFrac float64
|
||||
}
|
||||
|
||||
// NewCredentials returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes a Config provider to create the STS client. The ConfigProvider is
|
||||
// satisfied by the session.Session type.
|
||||
//
|
||||
// It is safe to share the returned Credentials with multiple Sessions and
|
||||
// service clients. All access to the credentials and refreshing them
|
||||
// will be synchronized.
|
||||
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: sts.New(c),
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
|
||||
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
|
||||
// role will be named after a nanosecond timestamp of this operation.
|
||||
//
|
||||
// Takes an AssumeRoler which can be satisfied by the STS client.
|
||||
//
|
||||
// It is safe to share the returned Credentials with multiple Sessions and
|
||||
// service clients. All access to the credentials and refreshing them
|
||||
// will be synchronized.
|
||||
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
|
||||
p := &AssumeRoleProvider{
|
||||
Client: svc,
|
||||
RoleARN: roleARN,
|
||||
Duration: DefaultDuration,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
option(p)
|
||||
}
|
||||
|
||||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
// Retrieve generates a new set of temporary credentials using STS.
|
||||
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
|
||||
|
||||
// Apply defaults where parameters are not set.
|
||||
if p.RoleSessionName == "" {
|
||||
// Try to work out a role name that will hopefully end up unique.
|
||||
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
|
||||
}
|
||||
if p.Duration == 0 {
|
||||
// Expire as often as AWS permits.
|
||||
p.Duration = DefaultDuration
|
||||
}
|
||||
jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
|
||||
input := &sts.AssumeRoleInput{
|
||||
DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
|
||||
RoleArn: aws.String(p.RoleARN),
|
||||
RoleSessionName: aws.String(p.RoleSessionName),
|
||||
ExternalId: p.ExternalID,
|
||||
}
|
||||
if p.Policy != nil {
|
||||
input.Policy = p.Policy
|
||||
}
|
||||
if p.SerialNumber != nil {
|
||||
if p.TokenCode != nil {
|
||||
input.SerialNumber = p.SerialNumber
|
||||
input.TokenCode = p.TokenCode
|
||||
} else if p.TokenProvider != nil {
|
||||
input.SerialNumber = p.SerialNumber
|
||||
code, err := p.TokenProvider()
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
input.TokenCode = aws.String(code)
|
||||
} else {
|
||||
return credentials.Value{ProviderName: ProviderName},
|
||||
awserr.New("AssumeRoleTokenNotAvailable",
|
||||
"assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
|
||||
}
|
||||
}
|
||||
|
||||
roleOutput, err := p.Client.AssumeRole(input)
|
||||
if err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, err
|
||||
}
|
||||
|
||||
// We will proactively generate new credentials before they expire.
|
||||
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
|
||||
|
||||
return credentials.Value{
|
||||
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
|
||||
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
|
||||
SessionToken: *roleOutput.Credentials.SessionToken,
|
||||
ProviderName: ProviderName,
|
||||
}, nil
|
||||
}
|
46
src/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
46
src/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
|
||||
// via UDP connection. Using the Start function will enable the reporting of
|
||||
// metrics on a given port. If Start is called, with different parameters, again,
|
||||
// a panic will occur.
|
||||
//
|
||||
// Pause can be called to pause any metrics publishing on a given port. Sessions
|
||||
// that have had their handlers modified via InjectHandlers may still be used.
|
||||
// However, the handlers will act as a no-op meaning no metrics will be published.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", ":31000")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed starting CSM: %v", err))
|
||||
// }
|
||||
//
|
||||
// sess, err := session.NewSession(&aws.Config{})
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("failed loading session: %v", err))
|
||||
// }
|
||||
//
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// client := s3.New(sess)
|
||||
// resp, err := client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Will pause monitoring
|
||||
// r.Pause()
|
||||
// resp, err = client.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
//
|
||||
// // Resume monitoring
|
||||
// r.Continue()
|
||||
//
|
||||
// Start returns a Reporter that is used to enable or disable monitoring. If
|
||||
// access to the Reporter is required later, calling Get will return the Reporter
|
||||
// singleton.
|
||||
//
|
||||
// Example:
|
||||
// r := csm.Get()
|
||||
// r.Continue()
|
||||
package csm
|
67
src/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
67
src/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// Client side metric handler names
|
||||
const (
|
||||
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
|
||||
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
|
||||
)
|
||||
|
||||
// Start will start the a long running go routine to capture
|
||||
// client side metrics. Calling start multiple time will only
|
||||
// start the metric listener once and will panic if a different
|
||||
// client ID or port is passed in.
|
||||
//
|
||||
// Example:
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(sess.Handlers)
|
||||
//
|
||||
// svc := s3.New(sess)
|
||||
// out, err := svc.GetObject(&s3.GetObjectInput{
|
||||
// Bucket: aws.String("bucket"),
|
||||
// Key: aws.String("key"),
|
||||
// })
|
||||
func Start(clientID string, url string) (*Reporter, error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if sender == nil {
|
||||
sender = newReporter(clientID, url)
|
||||
} else {
|
||||
if sender.clientID != clientID {
|
||||
panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
|
||||
}
|
||||
|
||||
if sender.url != url {
|
||||
panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
|
||||
}
|
||||
}
|
||||
|
||||
if err := connect(url); err != nil {
|
||||
sender = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sender, nil
|
||||
}
|
||||
|
||||
// Get will return a reporter if one exists, if one does not exist, nil will
|
||||
// be returned.
|
||||
func Get() *Reporter {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
return sender
|
||||
}
|
109
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
109
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
type metricTime time.Time
|
||||
|
||||
func (t metricTime) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(time.Time(t).UnixNano())
|
||||
return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
|
||||
}
|
||||
|
||||
type metric struct {
|
||||
ClientID *string `json:"ClientId,omitempty"`
|
||||
API *string `json:"Api,omitempty"`
|
||||
Service *string `json:"Service,omitempty"`
|
||||
Timestamp *metricTime `json:"Timestamp,omitempty"`
|
||||
Type *string `json:"Type,omitempty"`
|
||||
Version *int `json:"Version,omitempty"`
|
||||
|
||||
AttemptCount *int `json:"AttemptCount,omitempty"`
|
||||
Latency *int `json:"Latency,omitempty"`
|
||||
|
||||
Fqdn *string `json:"Fqdn,omitempty"`
|
||||
UserAgent *string `json:"UserAgent,omitempty"`
|
||||
AttemptLatency *int `json:"AttemptLatency,omitempty"`
|
||||
|
||||
SessionToken *string `json:"SessionToken,omitempty"`
|
||||
Region *string `json:"Region,omitempty"`
|
||||
AccessKey *string `json:"AccessKey,omitempty"`
|
||||
HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
|
||||
XAmzID2 *string `json:"XAmzId2,omitempty"`
|
||||
XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
|
||||
|
||||
AWSException *string `json:"AwsException,omitempty"`
|
||||
AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
|
||||
SDKException *string `json:"SdkException,omitempty"`
|
||||
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
|
||||
|
||||
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
|
||||
FinalAWSException *string `json:"FinalAwsException,omitempty"`
|
||||
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
|
||||
FinalSDKException *string `json:"FinalSdkException,omitempty"`
|
||||
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
|
||||
|
||||
DestinationIP *string `json:"DestinationIp,omitempty"`
|
||||
ConnectionReused *int `json:"ConnectionReused,omitempty"`
|
||||
|
||||
AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
|
||||
ConnectLatency *int `json:"ConnectLatency,omitempty"`
|
||||
RequestLatency *int `json:"RequestLatency,omitempty"`
|
||||
DNSLatency *int `json:"DnsLatency,omitempty"`
|
||||
TCPLatency *int `json:"TcpLatency,omitempty"`
|
||||
SSLLatency *int `json:"SslLatency,omitempty"`
|
||||
|
||||
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
|
||||
}
|
||||
|
||||
func (m *metric) TruncateFields() {
|
||||
m.ClientID = truncateString(m.ClientID, 255)
|
||||
m.UserAgent = truncateString(m.UserAgent, 256)
|
||||
|
||||
m.AWSException = truncateString(m.AWSException, 128)
|
||||
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
|
||||
|
||||
m.SDKException = truncateString(m.SDKException, 128)
|
||||
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
|
||||
|
||||
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
|
||||
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
|
||||
|
||||
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
|
||||
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
|
||||
}
|
||||
|
||||
func truncateString(v *string, l int) *string {
|
||||
if v != nil && len(*v) > l {
|
||||
nv := (*v)[:l]
|
||||
return &nv
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (m *metric) SetException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.AWSException = aws.String(te.exception)
|
||||
m.AWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.SDKException = aws.String(te.exception)
|
||||
m.SDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *metric) SetFinalException(e metricException) {
|
||||
switch te := e.(type) {
|
||||
case awsException:
|
||||
m.FinalAWSException = aws.String(te.exception)
|
||||
m.FinalAWSExceptionMessage = aws.String(te.message)
|
||||
case sdkException:
|
||||
m.FinalSDKException = aws.String(te.exception)
|
||||
m.FinalSDKExceptionMessage = aws.String(te.message)
|
||||
}
|
||||
}
|
54
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
54
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const (
|
||||
runningEnum = iota
|
||||
pausedEnum
|
||||
)
|
||||
|
||||
var (
|
||||
// MetricsChannelSize of metrics to hold in the channel
|
||||
MetricsChannelSize = 100
|
||||
)
|
||||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(&ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
// Push will push metrics to the metric channel if the channel
|
||||
// is not paused
|
||||
func (ch *metricChan) Push(m metric) bool {
|
||||
if ch.IsPaused() {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case ch.ch <- m:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
26
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
generated
vendored
Normal file
26
src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package csm
|
||||
|
||||
type metricException interface {
|
||||
Exception() string
|
||||
Message() string
|
||||
}
|
||||
|
||||
type requestException struct {
|
||||
exception string
|
||||
message string
|
||||
}
|
||||
|
||||
func (e requestException) Exception() string {
|
||||
return e.exception
|
||||
}
|
||||
func (e requestException) Message() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
type awsException struct {
|
||||
requestException
|
||||
}
|
||||
|
||||
type sdkException struct {
|
||||
requestException
|
||||
}
|
260
src/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
260
src/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
generated
vendored
Normal file
@ -0,0 +1,260 @@
|
||||
package csm
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultPort is used when no port is specified
|
||||
DefaultPort = "31000"
|
||||
)
|
||||
|
||||
// Reporter will gather metrics of API requests made and
|
||||
// send those metrics to the CSM endpoint.
|
||||
type Reporter struct {
|
||||
clientID string
|
||||
url string
|
||||
conn net.Conn
|
||||
metricsCh metricChan
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
var (
|
||||
sender *Reporter
|
||||
)
|
||||
|
||||
func connect(url string) error {
|
||||
const network = "udp"
|
||||
if err := sender.connect(network, url); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if sender.done == nil {
|
||||
sender.done = make(chan struct{})
|
||||
go sender.start()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newReporter(clientID, url string) *Reporter {
|
||||
return &Reporter{
|
||||
clientID: clientID,
|
||||
url: url,
|
||||
metricsCh: newMetricChan(MetricsChannelSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
creds, _ := r.Config.Credentials.Get()
|
||||
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Region: r.Config.Region,
|
||||
Type: aws.String("ApiCallAttempt"),
|
||||
Version: aws.Int(1),
|
||||
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
|
||||
AccessKey: aws.String(creds.AccessKeyID),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
m.SetException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func getMetricException(err awserr.Error) metricException {
|
||||
msg := err.Error()
|
||||
code := err.Code()
|
||||
|
||||
switch code {
|
||||
case "RequestError",
|
||||
request.ErrCodeSerialization,
|
||||
request.CanceledErrorCode:
|
||||
return sdkException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
default:
|
||||
return awsException{
|
||||
requestException{exception: code, message: msg},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rep *Reporter) sendAPICallMetric(r *request.Request) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := metric{
|
||||
ClientID: aws.String(rep.clientID),
|
||||
API: aws.String(r.Operation.Name),
|
||||
Service: aws.String(r.ClientInfo.ServiceID),
|
||||
Timestamp: (*metricTime)(&now),
|
||||
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
|
||||
Type: aws.String("ApiCall"),
|
||||
AttemptCount: aws.Int(r.RetryCount + 1),
|
||||
Region: r.Config.Region,
|
||||
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
|
||||
XAmzRequestID: aws.String(r.RequestID),
|
||||
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
|
||||
}
|
||||
|
||||
if r.Error != nil {
|
||||
if awserr, ok := r.Error.(awserr.Error); ok {
|
||||
m.SetFinalException(getMetricException(awserr))
|
||||
}
|
||||
}
|
||||
|
||||
m.TruncateFields()
|
||||
|
||||
// TODO: Probably want to figure something out for logging dropped
|
||||
// metrics
|
||||
rep.metricsCh.Push(m)
|
||||
}
|
||||
|
||||
func (rep *Reporter) connect(network, url string) error {
|
||||
if rep.conn != nil {
|
||||
rep.conn.Close()
|
||||
}
|
||||
|
||||
conn, err := net.Dial(network, url)
|
||||
if err != nil {
|
||||
return awserr.New("UDPError", "Could not connect", err)
|
||||
}
|
||||
|
||||
rep.conn = conn
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rep *Reporter) close() {
|
||||
if rep.done != nil {
|
||||
close(rep.done)
|
||||
}
|
||||
|
||||
rep.metricsCh.Pause()
|
||||
}
|
||||
|
||||
func (rep *Reporter) start() {
|
||||
defer func() {
|
||||
rep.metricsCh.Pause()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-rep.done:
|
||||
rep.done = nil
|
||||
return
|
||||
case m := <-rep.metricsCh.ch:
|
||||
// TODO: What to do with this error? Probably should just log
|
||||
b, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rep.conn.Write(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pause will pause the metric channel preventing any new metrics from
|
||||
// being added.
|
||||
func (rep *Reporter) Pause() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
rep.close()
|
||||
}
|
||||
|
||||
// Continue will reopen the metric channel and allow for monitoring
|
||||
// to be resumed.
|
||||
func (rep *Reporter) Continue() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !rep.metricsCh.IsPaused() {
|
||||
return
|
||||
}
|
||||
|
||||
rep.metricsCh.Continue()
|
||||
}
|
||||
|
||||
// InjectHandlers will will enable client side metrics and inject the proper
|
||||
// handlers to handle how metrics are sent.
|
||||
//
|
||||
// Example:
|
||||
// // Start must be called in order to inject the correct handlers
|
||||
// r, err := csm.Start("clientID", "127.0.0.1:8094")
|
||||
// if err != nil {
|
||||
// panic(fmt.Errorf("expected no error, but received %v", err))
|
||||
// }
|
||||
//
|
||||
// sess := session.NewSession()
|
||||
// r.InjectHandlers(&sess.Handlers)
|
||||
//
|
||||
// // create a new service client with our client side metric session
|
||||
// svc := s3.New(sess)
|
||||
func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
|
||||
if rep == nil {
|
||||
return
|
||||
}
|
||||
|
||||
handlers.Complete.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallMetricHandlerName,
|
||||
Fn: rep.sendAPICallMetric,
|
||||
})
|
||||
|
||||
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
|
||||
Name: APICallAttemptMetricHandlerName,
|
||||
Fn: rep.sendAPICallAttemptMetric,
|
||||
})
|
||||
}
|
||||
|
||||
// boolIntValue return 1 for true and 0 for false.
|
||||
func boolIntValue(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
207
src/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
207
src/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
// Package defaults is a collection of helpers to retrieve the SDK's default
|
||||
// configuration and handlers.
|
||||
//
|
||||
// Generally this package shouldn't be used directly, but session.Session
|
||||
// instead. This package is useful when you need to reset the defaults
|
||||
// of a session or service client to the SDK defaults before setting
|
||||
// additional parameters.
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
|
||||
"github.com/aws/aws-sdk-go/aws/ec2metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// A Defaults provides a collection of default values for SDK clients.
|
||||
type Defaults struct {
|
||||
Config *aws.Config
|
||||
Handlers request.Handlers
|
||||
}
|
||||
|
||||
// Get returns the SDK's default values with Config and handlers pre-configured.
|
||||
func Get() Defaults {
|
||||
cfg := Config()
|
||||
handlers := Handlers()
|
||||
cfg.Credentials = CredChain(cfg, handlers)
|
||||
|
||||
return Defaults{
|
||||
Config: cfg,
|
||||
Handlers: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
// Config returns the default configuration without credentials.
|
||||
// To retrieve a config with credentials also included use
|
||||
// `defaults.Get().Config` instead.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the configuration of an
|
||||
// existing service client or session.
|
||||
func Config() *aws.Config {
|
||||
return aws.NewConfig().
|
||||
WithCredentials(credentials.AnonymousCredentials).
|
||||
WithRegion(os.Getenv("AWS_REGION")).
|
||||
WithHTTPClient(http.DefaultClient).
|
||||
WithMaxRetries(aws.UseServiceDefaultRetries).
|
||||
WithLogger(aws.NewDefaultLogger()).
|
||||
WithLogLevel(aws.LogOff).
|
||||
WithEndpointResolver(endpoints.DefaultResolver())
|
||||
}
|
||||
|
||||
// Handlers returns the default request handlers.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the request handlers of an
|
||||
// existing service client or session.
|
||||
func Handlers() request.Handlers {
|
||||
var handlers request.Handlers
|
||||
|
||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||
handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
|
||||
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
|
||||
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
|
||||
|
||||
return handlers
|
||||
}
|
||||
|
||||
// CredChain returns the default credential chain.
|
||||
//
|
||||
// Generally you shouldn't need to use this method directly, but
|
||||
// is available if you need to reset the credentials of an
|
||||
// existing service client or session's Config.
|
||||
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
|
||||
return credentials.NewCredentials(&credentials.ChainProvider{
|
||||
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
|
||||
Providers: CredProviders(cfg, handlers),
|
||||
})
|
||||
}
|
||||
|
||||
// CredProviders returns the slice of providers used in
|
||||
// the default credential chain.
|
||||
//
|
||||
// For applications that need to use some other provider (for example use
|
||||
// different environment variables for legacy reasons) but still fall back
|
||||
// on the default chain of providers. This allows that default chaint to be
|
||||
// automatically updated
|
||||
func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
|
||||
return []credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
|
||||
RemoteCredProvider(*cfg, handlers),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
|
||||
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
|
||||
)
|
||||
|
||||
// RemoteCredProvider returns a credentials provider for the default remote
|
||||
// endpoints such as EC2 or ECS Roles.
|
||||
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||
if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
|
||||
return localHTTPCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
|
||||
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
|
||||
return httpCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
return ec2RoleProvider(cfg, handlers)
|
||||
}
|
||||
|
||||
var lookupHostFn = net.LookupHost
|
||||
|
||||
func isLoopbackHost(host string) (bool, error) {
|
||||
ip := net.ParseIP(host)
|
||||
if ip != nil {
|
||||
return ip.IsLoopback(), nil
|
||||
}
|
||||
|
||||
// Host is not an ip, perform lookup
|
||||
addrs, err := lookupHostFn(host)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if !net.ParseIP(addr).IsLoopback() {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
var errMsg string
|
||||
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
errMsg = fmt.Sprintf("invalid URL, %v", err)
|
||||
} else {
|
||||
host := aws.URLHostname(parsed)
|
||||
if len(host) == 0 {
|
||||
errMsg = "unable to parse host from local HTTP cred provider URL"
|
||||
} else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
|
||||
errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
|
||||
} else if !isLoopback {
|
||||
errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errMsg) > 0 {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
|
||||
}
|
||||
return credentials.ErrorProvider{
|
||||
Err: awserr.New("CredentialsEndpointError", errMsg, err),
|
||||
ProviderName: endpointcreds.ProviderName,
|
||||
}
|
||||
}
|
||||
|
||||
return httpCredProvider(cfg, handlers, u)
|
||||
}
|
||||
|
||||
func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
|
||||
return endpointcreds.NewProviderClient(cfg, handlers, u,
|
||||
func(p *endpointcreds.Provider) {
|
||||
p.ExpiryWindow = 5 * time.Minute
|
||||
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
|
||||
resolver := cfg.EndpointResolver
|
||||
if resolver == nil {
|
||||
resolver = endpoints.DefaultResolver()
|
||||
}
|
||||
|
||||
e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
|
||||
return &ec2rolecreds.EC2RoleProvider{
|
||||
Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
|
||||
ExpiryWindow: 5 * time.Minute,
|
||||
}
|
||||
}
|
27
src/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
generated
vendored
Normal file
27
src/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/internal/shareddefaults"
|
||||
)
|
||||
|
||||
// SharedCredentialsFilename returns the SDK's default file path
|
||||
// for the shared credentials file.
|
||||
//
|
||||
// Builds the shared config file path based on the OS's platform.
|
||||
//
|
||||
// - Linux/Unix: $HOME/.aws/credentials
|
||||
// - Windows: %USERPROFILE%\.aws\credentials
|
||||
func SharedCredentialsFilename() string {
|
||||
return shareddefaults.SharedCredentialsFilename()
|
||||
}
|
||||
|
||||
// SharedConfigFilename returns the SDK's default file path for
|
||||
// the shared config file.
|
||||
//
|
||||
// Builds the shared config file path based on the OS's platform.
|
||||
//
|
||||
// - Linux/Unix: $HOME/.aws/config
|
||||
// - Windows: %USERPROFILE%\.aws\config
|
||||
func SharedConfigFilename() string {
|
||||
return shareddefaults.SharedConfigFilename()
|
||||
}
|
56
src/vendor/github.com/aws/aws-sdk-go/aws/doc.go
generated
vendored
Normal file
56
src/vendor/github.com/aws/aws-sdk-go/aws/doc.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
// Package aws provides the core SDK's utilities and shared types. Use this package's
|
||||
// utilities to simplify setting and reading API operations parameters.
|
||||
//
|
||||
// Value and Pointer Conversion Utilities
|
||||
//
|
||||
// This package includes a helper conversion utility for each scalar type the SDK's
|
||||
// API use. These utilities make getting a pointer of the scalar, and dereferencing
|
||||
// a pointer easier.
|
||||
//
|
||||
// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
|
||||
// The Pointer to value will safely dereference the pointer and return its value.
|
||||
// If the pointer was nil, the scalar's zero value will be returned.
|
||||
//
|
||||
// The value to pointer functions will be named after the scalar type. So get a
|
||||
// *string from a string value use the "String" function. This makes it easy to
|
||||
// to get pointer of a literal string value, because getting the address of a
|
||||
// literal requires assigning the value to a variable first.
|
||||
//
|
||||
// var strPtr *string
|
||||
//
|
||||
// // Without the SDK's conversion functions
|
||||
// str := "my string"
|
||||
// strPtr = &str
|
||||
//
|
||||
// // With the SDK's conversion functions
|
||||
// strPtr = aws.String("my string")
|
||||
//
|
||||
// // Convert *string to string value
|
||||
// str = aws.StringValue(strPtr)
|
||||
//
|
||||
// In addition to scalars the aws package also includes conversion utilities for
|
||||
// map and slice for commonly types used in API parameters. The map and slice
|
||||
// conversion functions use similar naming pattern as the scalar conversion
|
||||
// functions.
|
||||
//
|
||||
// var strPtrs []*string
|
||||
// var strs []string = []string{"Go", "Gophers", "Go"}
|
||||
//
|
||||
// // Convert []string to []*string
|
||||
// strPtrs = aws.StringSlice(strs)
|
||||
//
|
||||
// // Convert []*string to []string
|
||||
// strs = aws.StringValueSlice(strPtrs)
|
||||
//
|
||||
// SDK Default HTTP Client
|
||||
//
|
||||
// The SDK will use the http.DefaultClient if a HTTP client is not provided to
|
||||
// the SDK's Session, or service client constructor. This means that if the
|
||||
// http.DefaultClient is modified by other components of your application the
|
||||
// modifications will be picked up by the SDK as well.
|
||||
//
|
||||
// In some cases this might be intended, but it is a better practice to create
|
||||
// a custom HTTP Client to share explicitly through your application. You can
|
||||
// configure the SDK to use the custom HTTP Client by setting the HTTPClient
|
||||
// value of the SDK's Config type when creating a Session or service client.
|
||||
package aws
|
169
src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
169
src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
Normal file
@ -0,0 +1,169 @@
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkuri"
|
||||
)
|
||||
|
||||
// GetMetadata uses the path provided to request information from the EC2
|
||||
// instance metdata service. The content will be returned as a string, or
|
||||
// error if the request failed.
|
||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetMetadata",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: sdkuri.PathJoin("/meta-data", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetUserData returns the userdata that was configured for the service. If
|
||||
// there is no user-data setup for the EC2 instance a "NotFoundError" error
|
||||
// code will be returned.
|
||||
func (c *EC2Metadata) GetUserData() (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetUserData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: "/user-data",
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
|
||||
if r.HTTPResponse.StatusCode == http.StatusNotFound {
|
||||
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
|
||||
}
|
||||
})
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetDynamicData uses the path provided to request information from the EC2
|
||||
// instance metadata service for dynamic data. The content will be returned
|
||||
// as a string, or error if the request failed.
|
||||
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||
op := &request.Operation{
|
||||
Name: "GetDynamicData",
|
||||
HTTPMethod: "GET",
|
||||
HTTPPath: sdkuri.PathJoin("/dynamic", p),
|
||||
}
|
||||
|
||||
output := &metadataOutput{}
|
||||
req := c.NewRequest(op, nil, output)
|
||||
err := req.Send()
|
||||
|
||||
return output.Content, err
|
||||
}
|
||||
|
||||
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||
// instance. Error is returned if the request fails or is unable to parse
|
||||
// the response.
|
||||
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||
resp, err := c.GetDynamicData("instance-identity/document")
|
||||
if err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
doc := EC2InstanceIdentityDocument{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||
return EC2InstanceIdentityDocument{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode EC2 instance identity document", err)
|
||||
}
|
||||
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// IAMInfo retrieves IAM info from the metadata API
|
||||
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||
resp, err := c.GetMetadata("iam/info")
|
||||
if err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataRequestError",
|
||||
"failed to get EC2 IAM info", err)
|
||||
}
|
||||
|
||||
info := EC2IAMInfo{}
|
||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||
return EC2IAMInfo{},
|
||||
awserr.New(request.ErrCodeSerialization,
|
||||
"failed to decode EC2 IAM info", err)
|
||||
}
|
||||
|
||||
if info.Code != "Success" {
|
||||
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||
return EC2IAMInfo{},
|
||||
awserr.New("EC2MetadataError", errMsg, nil)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Region returns the region the instance is running in.
|
||||
func (c *EC2Metadata) Region() (string, error) {
|
||||
resp, err := c.GetMetadata("placement/availability-zone")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(resp) == 0 {
|
||||
return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
|
||||
}
|
||||
|
||||
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
|
||||
return resp[:len(resp)-1], nil
|
||||
}
|
||||
|
||||
// Available returns if the application has access to the EC2 Metadata service.
|
||||
// Can be used to determine if application is running within an EC2 Instance and
|
||||
// the metadata service is available.
|
||||
func (c *EC2Metadata) Available() bool {
|
||||
if _, err := c.GetMetadata("instance-id"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// An EC2IAMInfo provides the shape for unmarshaling
|
||||
// an IAM info from the metadata API
|
||||
type EC2IAMInfo struct {
|
||||
Code string
|
||||
LastUpdated time.Time
|
||||
InstanceProfileArn string
|
||||
InstanceProfileID string
|
||||
}
|
||||
|
||||
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
|
||||
// an instance identity document
|
||||
type EC2InstanceIdentityDocument struct {
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
}
|
152
src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
152
src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
// Package ec2metadata provides the client for making API calls to the
|
||||
// EC2 Metadata service.
|
||||
//
|
||||
// This package's client can be disabled completely by setting the environment
|
||||
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
|
||||
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
|
||||
// be used while the environment variable is set to true, (case insensitive).
|
||||
package ec2metadata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// ServiceName is the name of the service.
|
||||
const ServiceName = "ec2metadata"
|
||||
const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
|
||||
|
||||
// A EC2Metadata is an EC2 Metadata service Client.
|
||||
type EC2Metadata struct {
|
||||
*client.Client
|
||||
}
|
||||
|
||||
// New creates a new instance of the EC2Metadata client with a session.
|
||||
// This client is safe to use across multiple goroutines.
|
||||
//
|
||||
//
|
||||
// Example:
|
||||
// // Create a EC2Metadata client from just a session.
|
||||
// svc := ec2metadata.New(mySession)
|
||||
//
|
||||
// // Create a EC2Metadata client with additional configuration
|
||||
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
|
||||
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
|
||||
c := p.ClientConfig(ServiceName, cfgs...)
|
||||
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
|
||||
}
|
||||
|
||||
// NewClient returns a new EC2Metadata client. Should be used to create
|
||||
// a client when not using a session. Generally using just New with a session
|
||||
// is preferred.
|
||||
//
|
||||
// If an unmodified HTTP client is provided from the stdlib default, or no client
|
||||
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
|
||||
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
|
||||
func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
|
||||
if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
|
||||
// If the http client is unmodified and this feature is not disabled
|
||||
// set custom timeouts for EC2Metadata requests.
|
||||
cfg.HTTPClient = &http.Client{
|
||||
// use a shorter timeout than default because the metadata
|
||||
// service is local if it is running, and to fail faster
|
||||
// if not running on an ec2 instance.
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
svc := &EC2Metadata{
|
||||
Client: client.New(
|
||||
cfg,
|
||||
metadata.ClientInfo{
|
||||
ServiceName: ServiceName,
|
||||
ServiceID: ServiceName,
|
||||
Endpoint: endpoint,
|
||||
APIVersion: "latest",
|
||||
},
|
||||
handlers,
|
||||
),
|
||||
}
|
||||
|
||||
svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
|
||||
svc.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||
svc.Handlers.Validate.Clear()
|
||||
svc.Handlers.Validate.PushBack(validateEndpointHandler)
|
||||
|
||||
// Disable the EC2 Metadata service if the environment variable is set.
|
||||
// This shortcirctes the service's functionality to always fail to send
|
||||
// requests.
|
||||
if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
|
||||
svc.Handlers.Send.SwapNamed(request.NamedHandler{
|
||||
Name: corehandlers.SendHandler.Name,
|
||||
Fn: func(r *request.Request) {
|
||||
r.HTTPResponse = &http.Response{
|
||||
Header: http.Header{},
|
||||
}
|
||||
r.Error = awserr.New(
|
||||
request.CanceledErrorCode,
|
||||
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
|
||||
nil)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Add additional options to the service config
|
||||
for _, option := range opts {
|
||||
option(svc.Client)
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func httpClientZero(c *http.Client) bool {
|
||||
return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
|
||||
}
|
||||
|
||||
type metadataOutput struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
func unmarshalHandler(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
|
||||
return
|
||||
}
|
||||
|
||||
if data, ok := r.Data.(*metadataOutput); ok {
|
||||
data.Content = b.String()
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalError(r *request.Request) {
|
||||
defer r.HTTPResponse.Body.Close()
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Response body format is not consistent between metadata endpoints.
|
||||
// Grab the error message as a string and include that as the source error
|
||||
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
||||
}
|
||||
|
||||
func validateEndpointHandler(r *request.Request) {
|
||||
if r.ClientInfo.Endpoint == "" {
|
||||
r.Error = aws.ErrMissingEndpoint
|
||||
}
|
||||
}
|
188
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
Normal file
188
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
type modelDefinition map[string]json.RawMessage
|
||||
|
||||
// A DecodeModelOptions are the options for how the endpoints model definition
|
||||
// are decoded.
|
||||
type DecodeModelOptions struct {
|
||||
SkipCustomizations bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together.
|
||||
func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
|
||||
for _, fn := range optFns {
|
||||
fn(d)
|
||||
}
|
||||
}
|
||||
|
||||
// DecodeModel unmarshals a Regions and Endpoint model definition file into
|
||||
// a endpoint Resolver. If the file format is not supported, or an error occurs
|
||||
// when unmarshaling the model an error will be returned.
|
||||
//
|
||||
// Casting the return value of this func to a EnumPartitions will
|
||||
// allow you to get a list of the partitions in the order the endpoints
|
||||
// will be resolved in.
|
||||
//
|
||||
// resolver, err := endpoints.DecodeModel(reader)
|
||||
//
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
|
||||
var opts DecodeModelOptions
|
||||
opts.Set(optFns...)
|
||||
|
||||
// Get the version of the partition file to determine what
|
||||
// unmarshaling model to use.
|
||||
modelDef := modelDefinition{}
|
||||
if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
|
||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
||||
}
|
||||
|
||||
var version string
|
||||
if b, ok := modelDef["version"]; ok {
|
||||
version = string(b)
|
||||
} else {
|
||||
return nil, newDecodeModelError("endpoints version not found in model", nil)
|
||||
}
|
||||
|
||||
if version == "3" {
|
||||
return decodeV3Endpoints(modelDef, opts)
|
||||
}
|
||||
|
||||
return nil, newDecodeModelError(
|
||||
fmt.Sprintf("endpoints version %s, not supported", version), nil)
|
||||
}
|
||||
|
||||
func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
|
||||
b, ok := modelDef["partitions"]
|
||||
if !ok {
|
||||
return nil, newDecodeModelError("endpoints model missing partitions", nil)
|
||||
}
|
||||
|
||||
ps := partitions{}
|
||||
if err := json.Unmarshal(b, &ps); err != nil {
|
||||
return nil, newDecodeModelError("failed to decode endpoints model", err)
|
||||
}
|
||||
|
||||
if opts.SkipCustomizations {
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// Customization
|
||||
for i := 0; i < len(ps); i++ {
|
||||
p := &ps[i]
|
||||
custAddEC2Metadata(p)
|
||||
custAddS3DualStack(p)
|
||||
custRmIotDataService(p)
|
||||
custFixAppAutoscalingChina(p)
|
||||
custFixAppAutoscalingUsGov(p)
|
||||
}
|
||||
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func custAddS3DualStack(p *partition) {
|
||||
if p.ID != "aws" {
|
||||
return
|
||||
}
|
||||
|
||||
custAddDualstack(p, "s3")
|
||||
custAddDualstack(p, "s3-control")
|
||||
}
|
||||
|
||||
func custAddDualstack(p *partition, svcName string) {
|
||||
s, ok := p.Services[svcName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.HasDualStack = boxedTrue
|
||||
s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
|
||||
|
||||
p.Services[svcName] = s
|
||||
}
|
||||
|
||||
func custAddEC2Metadata(p *partition) {
|
||||
p.Services["ec2metadata"] = service{
|
||||
IsRegionalized: boxedFalse,
|
||||
PartitionEndpoint: "aws-global",
|
||||
Endpoints: endpoints{
|
||||
"aws-global": endpoint{
|
||||
Hostname: "169.254.169.254/latest",
|
||||
Protocols: []string{"http"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func custRmIotDataService(p *partition) {
|
||||
delete(p.Services, "data.iot")
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingChina(p *partition) {
|
||||
if p.ID != "aws-cn" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
const expectHostname = `autoscaling.{region}.amazonaws.com`
|
||||
if e, a := s.Defaults.Hostname, expectHostname; e != a {
|
||||
fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.Hostname = expectHostname + ".cn"
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
func custFixAppAutoscalingUsGov(p *partition) {
|
||||
if p.ID != "aws-us-gov" {
|
||||
return
|
||||
}
|
||||
|
||||
const serviceName = "application-autoscaling"
|
||||
s, ok := p.Services[serviceName]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.CredentialScope.Service; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
if a := s.Defaults.Hostname; a != "" {
|
||||
fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
|
||||
return
|
||||
}
|
||||
|
||||
s.Defaults.CredentialScope.Service = "application-autoscaling"
|
||||
s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
|
||||
|
||||
p.Services[serviceName] = s
|
||||
}
|
||||
|
||||
type decodeModelError struct {
|
||||
awsError
|
||||
}
|
||||
|
||||
func newDecodeModelError(msg string, err error) decodeModelError {
|
||||
return decodeModelError{
|
||||
awsError: awserr.New("DecodeEndpointsModelError", msg, err),
|
||||
}
|
||||
}
|
4406
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
Normal file
4406
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
141
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
generated
vendored
Normal file
141
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
package endpoints
|
||||
|
||||
// Service identifiers
|
||||
//
|
||||
// Deprecated: Use client package's EndpointsID value instead of these
|
||||
// ServiceIDs. These IDs are not maintained, and are out of date.
|
||||
const (
|
||||
A4bServiceID = "a4b" // A4b.
|
||||
AcmServiceID = "acm" // Acm.
|
||||
AcmPcaServiceID = "acm-pca" // AcmPca.
|
||||
ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
|
||||
ApiPricingServiceID = "api.pricing" // ApiPricing.
|
||||
ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
|
||||
ApigatewayServiceID = "apigateway" // Apigateway.
|
||||
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
|
||||
Appstream2ServiceID = "appstream2" // Appstream2.
|
||||
AppsyncServiceID = "appsync" // Appsync.
|
||||
AthenaServiceID = "athena" // Athena.
|
||||
AutoscalingServiceID = "autoscaling" // Autoscaling.
|
||||
AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
|
||||
BatchServiceID = "batch" // Batch.
|
||||
BudgetsServiceID = "budgets" // Budgets.
|
||||
CeServiceID = "ce" // Ce.
|
||||
ChimeServiceID = "chime" // Chime.
|
||||
Cloud9ServiceID = "cloud9" // Cloud9.
|
||||
ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
|
||||
CloudformationServiceID = "cloudformation" // Cloudformation.
|
||||
CloudfrontServiceID = "cloudfront" // Cloudfront.
|
||||
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
|
||||
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
|
||||
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
|
||||
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
|
||||
CodebuildServiceID = "codebuild" // Codebuild.
|
||||
CodecommitServiceID = "codecommit" // Codecommit.
|
||||
CodedeployServiceID = "codedeploy" // Codedeploy.
|
||||
CodepipelineServiceID = "codepipeline" // Codepipeline.
|
||||
CodestarServiceID = "codestar" // Codestar.
|
||||
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
|
||||
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
|
||||
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
|
||||
ComprehendServiceID = "comprehend" // Comprehend.
|
||||
ConfigServiceID = "config" // Config.
|
||||
CurServiceID = "cur" // Cur.
|
||||
DatapipelineServiceID = "datapipeline" // Datapipeline.
|
||||
DaxServiceID = "dax" // Dax.
|
||||
DevicefarmServiceID = "devicefarm" // Devicefarm.
|
||||
DirectconnectServiceID = "directconnect" // Directconnect.
|
||||
DiscoveryServiceID = "discovery" // Discovery.
|
||||
DmsServiceID = "dms" // Dms.
|
||||
DsServiceID = "ds" // Ds.
|
||||
DynamodbServiceID = "dynamodb" // Dynamodb.
|
||||
Ec2ServiceID = "ec2" // Ec2.
|
||||
Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
|
||||
EcrServiceID = "ecr" // Ecr.
|
||||
EcsServiceID = "ecs" // Ecs.
|
||||
ElasticacheServiceID = "elasticache" // Elasticache.
|
||||
ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
|
||||
ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
|
||||
ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
|
||||
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
|
||||
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
|
||||
EmailServiceID = "email" // Email.
|
||||
EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
|
||||
EsServiceID = "es" // Es.
|
||||
EventsServiceID = "events" // Events.
|
||||
FirehoseServiceID = "firehose" // Firehose.
|
||||
FmsServiceID = "fms" // Fms.
|
||||
GameliftServiceID = "gamelift" // Gamelift.
|
||||
GlacierServiceID = "glacier" // Glacier.
|
||||
GlueServiceID = "glue" // Glue.
|
||||
GreengrassServiceID = "greengrass" // Greengrass.
|
||||
GuarddutyServiceID = "guardduty" // Guardduty.
|
||||
HealthServiceID = "health" // Health.
|
||||
IamServiceID = "iam" // Iam.
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
InspectorServiceID = "inspector" // Inspector.
|
||||
IotServiceID = "iot" // Iot.
|
||||
IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
|
||||
KinesisServiceID = "kinesis" // Kinesis.
|
||||
KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
|
||||
KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
|
||||
KmsServiceID = "kms" // Kms.
|
||||
LambdaServiceID = "lambda" // Lambda.
|
||||
LightsailServiceID = "lightsail" // Lightsail.
|
||||
LogsServiceID = "logs" // Logs.
|
||||
MachinelearningServiceID = "machinelearning" // Machinelearning.
|
||||
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
|
||||
MediaconvertServiceID = "mediaconvert" // Mediaconvert.
|
||||
MedialiveServiceID = "medialive" // Medialive.
|
||||
MediapackageServiceID = "mediapackage" // Mediapackage.
|
||||
MediastoreServiceID = "mediastore" // Mediastore.
|
||||
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
|
||||
MghServiceID = "mgh" // Mgh.
|
||||
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
|
||||
ModelsLexServiceID = "models.lex" // ModelsLex.
|
||||
MonitoringServiceID = "monitoring" // Monitoring.
|
||||
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
|
||||
NeptuneServiceID = "neptune" // Neptune.
|
||||
OpsworksServiceID = "opsworks" // Opsworks.
|
||||
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
|
||||
OrganizationsServiceID = "organizations" // Organizations.
|
||||
PinpointServiceID = "pinpoint" // Pinpoint.
|
||||
PollyServiceID = "polly" // Polly.
|
||||
RdsServiceID = "rds" // Rds.
|
||||
RedshiftServiceID = "redshift" // Redshift.
|
||||
RekognitionServiceID = "rekognition" // Rekognition.
|
||||
ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
|
||||
Route53ServiceID = "route53" // Route53.
|
||||
Route53domainsServiceID = "route53domains" // Route53domains.
|
||||
RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
|
||||
RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
|
||||
S3ServiceID = "s3" // S3.
|
||||
S3ControlServiceID = "s3-control" // S3Control.
|
||||
SagemakerServiceID = "api.sagemaker" // Sagemaker.
|
||||
SdbServiceID = "sdb" // Sdb.
|
||||
SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
|
||||
ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
|
||||
ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
|
||||
ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
|
||||
ShieldServiceID = "shield" // Shield.
|
||||
SmsServiceID = "sms" // Sms.
|
||||
SnowballServiceID = "snowball" // Snowball.
|
||||
SnsServiceID = "sns" // Sns.
|
||||
SqsServiceID = "sqs" // Sqs.
|
||||
SsmServiceID = "ssm" // Ssm.
|
||||
StatesServiceID = "states" // States.
|
||||
StoragegatewayServiceID = "storagegateway" // Storagegateway.
|
||||
StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
|
||||
StsServiceID = "sts" // Sts.
|
||||
SupportServiceID = "support" // Support.
|
||||
SwfServiceID = "swf" // Swf.
|
||||
TaggingServiceID = "tagging" // Tagging.
|
||||
TransferServiceID = "transfer" // Transfer.
|
||||
TranslateServiceID = "translate" // Translate.
|
||||
WafServiceID = "waf" // Waf.
|
||||
WafRegionalServiceID = "waf-regional" // WafRegional.
|
||||
WorkdocsServiceID = "workdocs" // Workdocs.
|
||||
WorkmailServiceID = "workmail" // Workmail.
|
||||
WorkspacesServiceID = "workspaces" // Workspaces.
|
||||
XrayServiceID = "xray" // Xray.
|
||||
)
|
66
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
Normal file
66
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Package endpoints provides the types and functionality for defining regions
|
||||
// and endpoints, as well as querying those definitions.
|
||||
//
|
||||
// The SDK's Regions and Endpoints metadata is code generated into the endpoints
|
||||
// package, and is accessible via the DefaultResolver function. This function
|
||||
// returns a endpoint Resolver will search the metadata and build an associated
|
||||
// endpoint if one is found. The default resolver will search all partitions
|
||||
// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
|
||||
// AWS GovCloud (US) (aws-us-gov).
|
||||
// .
|
||||
//
|
||||
// Enumerating Regions and Endpoint Metadata
|
||||
//
|
||||
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
|
||||
// will allow you to get access to the list of underlying Partitions with the
|
||||
// Partitions method. This is helpful if you want to limit the SDK's endpoint
|
||||
// resolving to a single partition, or enumerate regions, services, and endpoints
|
||||
// in the partition.
|
||||
//
|
||||
// resolver := endpoints.DefaultResolver()
|
||||
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
|
||||
//
|
||||
// for _, p := range partitions {
|
||||
// fmt.Println("Regions for", p.ID())
|
||||
// for id, _ := range p.Regions() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
//
|
||||
// fmt.Println("Services for", p.ID())
|
||||
// for id, _ := range p.Services() {
|
||||
// fmt.Println("*", id)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Using Custom Endpoints
|
||||
//
|
||||
// The endpoints package also gives you the ability to use your own logic how
|
||||
// endpoints are resolved. This is a great way to define a custom endpoint
|
||||
// for select services, without passing that logic down through your code.
|
||||
//
|
||||
// If a type implements the Resolver interface it can be used to resolve
|
||||
// endpoints. To use this with the SDK's Session and Config set the value
|
||||
// of the type to the EndpointsResolver field of aws.Config when initializing
|
||||
// the session, or service client.
|
||||
//
|
||||
// In addition the ResolverFunc is a wrapper for a func matching the signature
|
||||
// of Resolver.EndpointFor, converting it to a type that satisfies the
|
||||
// Resolver interface.
|
||||
//
|
||||
//
|
||||
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
// if service == endpoints.S3ServiceID {
|
||||
// return endpoints.ResolvedEndpoint{
|
||||
// URL: "s3.custom.endpoint.com",
|
||||
// SigningRegion: "custom-signing-region",
|
||||
// }, nil
|
||||
// }
|
||||
//
|
||||
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
|
||||
// }
|
||||
//
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// Region: aws.String("us-west-2"),
|
||||
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
|
||||
// }))
|
||||
package endpoints
|
449
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
Normal file
449
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
generated
vendored
Normal file
@ -0,0 +1,449 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// Options provide the configuration needed to direct how the
|
||||
// endpoints will be resolved.
|
||||
type Options struct {
|
||||
// DisableSSL forces the endpoint to be resolved as HTTP.
|
||||
// instead of HTTPS if the service supports it.
|
||||
DisableSSL bool
|
||||
|
||||
// Sets the resolver to resolve the endpoint as a dualstack endpoint
|
||||
// for the service. If dualstack support for a service is not known and
|
||||
// StrictMatching is not enabled a dualstack endpoint for the service will
|
||||
// be returned. This endpoint may not be valid. If StrictMatching is
|
||||
// enabled only services that are known to support dualstack will return
|
||||
// dualstack endpoints.
|
||||
UseDualStack bool
|
||||
|
||||
// Enables strict matching of services and regions resolved endpoints.
|
||||
// If the partition doesn't enumerate the exact service and region an
|
||||
// error will be returned. This option will prevent returning endpoints
|
||||
// that look valid, but may not resolve to any real endpoint.
|
||||
StrictMatching bool
|
||||
|
||||
// Enables resolving a service endpoint based on the region provided if the
|
||||
// service does not exist. The service endpoint ID will be used as the service
|
||||
// domain name prefix. By default the endpoint resolver requires the service
|
||||
// to be known when resolving endpoints.
|
||||
//
|
||||
// If resolving an endpoint on the partition list the provided region will
|
||||
// be used to determine which partition's domain name pattern to the service
|
||||
// endpoint ID with. If both the service and region are unknown and resolving
|
||||
// the endpoint on partition list an UnknownEndpointError error will be returned.
|
||||
//
|
||||
// If resolving and endpoint on a partition specific resolver that partition's
|
||||
// domain name pattern will be used with the service endpoint ID. If both
|
||||
// region and service do not exist when resolving an endpoint on a specific
|
||||
// partition the partition's domain pattern will be used to combine the
|
||||
// endpoint and region together.
|
||||
//
|
||||
// This option is ignored if StrictMatching is enabled.
|
||||
ResolveUnknownService bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together.
|
||||
func (o *Options) Set(optFns ...func(*Options)) {
|
||||
for _, fn := range optFns {
|
||||
fn(o)
|
||||
}
|
||||
}
|
||||
|
||||
// DisableSSLOption sets the DisableSSL options. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func DisableSSLOption(o *Options) {
|
||||
o.DisableSSL = true
|
||||
}
|
||||
|
||||
// UseDualStackOption sets the UseDualStack option. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func UseDualStackOption(o *Options) {
|
||||
o.UseDualStack = true
|
||||
}
|
||||
|
||||
// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
|
||||
// option when resolving endpoints.
|
||||
func StrictMatchingOption(o *Options) {
|
||||
o.StrictMatching = true
|
||||
}
|
||||
|
||||
// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
|
||||
// as a functional option when resolving endpoints.
|
||||
func ResolveUnknownServiceOption(o *Options) {
|
||||
o.ResolveUnknownService = true
|
||||
}
|
||||
|
||||
// A Resolver provides the interface for functionality to resolve endpoints.
|
||||
// The build in Partition and DefaultResolver return value satisfy this interface.
|
||||
type Resolver interface {
|
||||
EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
||||
}
|
||||
|
||||
// ResolverFunc is a helper utility that wraps a function so it satisfies the
|
||||
// Resolver interface. This is useful when you want to add additional endpoint
|
||||
// resolving logic, or stub out specific endpoints with custom values.
|
||||
type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
|
||||
|
||||
// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
|
||||
func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return fn(service, region, opts...)
|
||||
}
|
||||
|
||||
var schemeRE = regexp.MustCompile("^([^:]+)://")
|
||||
|
||||
// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
|
||||
// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
|
||||
//
|
||||
// If disableSSL is set, it will only set the URL's scheme if the URL does not
|
||||
// contain a scheme.
|
||||
func AddScheme(endpoint string, disableSSL bool) string {
|
||||
if !schemeRE.MatchString(endpoint) {
|
||||
scheme := "https"
|
||||
if disableSSL {
|
||||
scheme = "http"
|
||||
}
|
||||
endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
|
||||
}
|
||||
|
||||
return endpoint
|
||||
}
|
||||
|
||||
// EnumPartitions a provides a way to retrieve the underlying partitions that
|
||||
// make up the SDK's default Resolver, or any resolver decoded from a model
|
||||
// file.
|
||||
//
|
||||
// Use this interface with DefaultResolver and DecodeModels to get the list of
|
||||
// Partitions.
|
||||
type EnumPartitions interface {
|
||||
Partitions() []Partition
|
||||
}
|
||||
|
||||
// RegionsForService returns a map of regions for the partition and service.
|
||||
// If either the partition or service does not exist false will be returned
|
||||
// as the second parameter.
|
||||
//
|
||||
// This example shows how to get the regions for DynamoDB in the AWS partition.
|
||||
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
|
||||
//
|
||||
// This is equivalent to using the partition directly.
|
||||
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
|
||||
func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
|
||||
for _, p := range ps {
|
||||
if p.ID() != partitionID {
|
||||
continue
|
||||
}
|
||||
if _, ok := p.p.Services[serviceID]; !ok {
|
||||
break
|
||||
}
|
||||
|
||||
s := Service{
|
||||
id: serviceID,
|
||||
p: p.p,
|
||||
}
|
||||
return s.Regions(), true
|
||||
}
|
||||
|
||||
return map[string]Region{}, false
|
||||
}
|
||||
|
||||
// PartitionForRegion returns the first partition which includes the region
|
||||
// passed in. This includes both known regions and regions which match
|
||||
// a pattern supported by the partition which may include regions that are
|
||||
// not explicitly known by the partition. Use the Regions method of the
|
||||
// returned Partition if explicit support is needed.
|
||||
func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
|
||||
for _, p := range ps {
|
||||
if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
|
||||
return Partition{}, false
|
||||
}
|
||||
|
||||
// A Partition provides the ability to enumerate the partition's regions
|
||||
// and services.
|
||||
type Partition struct {
|
||||
id string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the identifier of the partition.
|
||||
func (p Partition) ID() string { return p.id }
|
||||
|
||||
// EndpointFor attempts to resolve the endpoint based on service and region.
|
||||
// See Options for information on configuring how the endpoint is resolved.
|
||||
//
|
||||
// If the service cannot be found in the metadata the UnknownServiceError
|
||||
// error will be returned. This validation will occur regardless if
|
||||
// StrictMatching is enabled. To enable resolving unknown services set the
|
||||
// "ResolveUnknownService" option to true. When StrictMatching is disabled
|
||||
// this option allows the partition resolver to resolve a endpoint based on
|
||||
// the service endpoint ID provided.
|
||||
//
|
||||
// When resolving endpoints you can choose to enable StrictMatching. This will
|
||||
// require the provided service and region to be known by the partition.
|
||||
// If the endpoint cannot be strictly resolved an error will be returned. This
|
||||
// mode is useful to ensure the endpoint resolved is valid. Without
|
||||
// StrictMatching enabled the endpoint returned my look valid but may not work.
|
||||
// StrictMatching requires the SDK to be updated if you want to take advantage
|
||||
// of new regions and services expansions.
|
||||
//
|
||||
// Errors that can be returned.
|
||||
// * UnknownServiceError
|
||||
// * UnknownEndpointError
|
||||
func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return p.p.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// Regions returns a map of Regions indexed by their ID. This is useful for
|
||||
// enumerating over the regions in a partition.
|
||||
func (p Partition) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id, r := range p.p.Regions {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Services returns a map of Service indexed by their ID. This is useful for
|
||||
// enumerating over the services in a partition.
|
||||
func (p Partition) Services() map[string]Service {
|
||||
ss := map[string]Service{}
|
||||
for id := range p.p.Services {
|
||||
ss[id] = Service{
|
||||
id: id,
|
||||
p: p.p,
|
||||
}
|
||||
}
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// A Region provides information about a region, and ability to resolve an
|
||||
// endpoint from the context of a region, given a service.
|
||||
type Region struct {
|
||||
id, desc string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the region's identifier.
|
||||
func (r Region) ID() string { return r.id }
|
||||
|
||||
// Description returns the region's description. The region description
|
||||
// is free text, it can be empty, and it may change between SDK releases.
|
||||
func (r Region) Description() string { return r.desc }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of the region given
|
||||
// a service. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return r.p.EndpointFor(service, r.id, opts...)
|
||||
}
|
||||
|
||||
// Services returns a list of all services that are known to be in this region.
|
||||
func (r Region) Services() map[string]Service {
|
||||
ss := map[string]Service{}
|
||||
for id, s := range r.p.Services {
|
||||
if _, ok := s.Endpoints[r.id]; ok {
|
||||
ss[id] = Service{
|
||||
id: id,
|
||||
p: r.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ss
|
||||
}
|
||||
|
||||
// A Service provides information about a service, and ability to resolve an
|
||||
// endpoint from the context of a service, given a region.
|
||||
type Service struct {
|
||||
id string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the identifier for the service.
|
||||
func (s Service) ID() string { return s.id }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of a service given
|
||||
// a region. See Partition.EndpointFor for usage and errors that can be returned.
|
||||
func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return s.p.EndpointFor(s.id, region, opts...)
|
||||
}
|
||||
|
||||
// Regions returns a map of Regions that the service is present in.
|
||||
//
|
||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
||||
// an URL that can be resolved to a instance of a service.
|
||||
func (s Service) Regions() map[string]Region {
|
||||
rs := map[string]Region{}
|
||||
for id := range s.p.Services[s.id].Endpoints {
|
||||
if r, ok := s.p.Regions[id]; ok {
|
||||
rs[id] = Region{
|
||||
id: id,
|
||||
desc: r.Description,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Endpoints returns a map of Endpoints indexed by their ID for all known
|
||||
// endpoints for a service.
|
||||
//
|
||||
// A region is the AWS region the service exists in. Whereas a Endpoint is
|
||||
// an URL that can be resolved to a instance of a service.
|
||||
func (s Service) Endpoints() map[string]Endpoint {
|
||||
es := map[string]Endpoint{}
|
||||
for id := range s.p.Services[s.id].Endpoints {
|
||||
es[id] = Endpoint{
|
||||
id: id,
|
||||
serviceID: s.id,
|
||||
p: s.p,
|
||||
}
|
||||
}
|
||||
|
||||
return es
|
||||
}
|
||||
|
||||
// A Endpoint provides information about endpoints, and provides the ability
|
||||
// to resolve that endpoint for the service, and the region the endpoint
|
||||
// represents.
|
||||
type Endpoint struct {
|
||||
id string
|
||||
serviceID string
|
||||
p *partition
|
||||
}
|
||||
|
||||
// ID returns the identifier for an endpoint.
|
||||
func (e Endpoint) ID() string { return e.id }
|
||||
|
||||
// ServiceID returns the identifier the endpoint belongs to.
|
||||
func (e Endpoint) ServiceID() string { return e.serviceID }
|
||||
|
||||
// ResolveEndpoint resolves an endpoint from the context of a service and
|
||||
// region the endpoint represents. See Partition.EndpointFor for usage and
|
||||
// errors that can be returned.
|
||||
func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
return e.p.EndpointFor(e.serviceID, e.id, opts...)
|
||||
}
|
||||
|
||||
// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
|
||||
// service, and region.
|
||||
type ResolvedEndpoint struct {
|
||||
// The endpoint URL
|
||||
URL string
|
||||
|
||||
// The region that should be used for signing requests.
|
||||
SigningRegion string
|
||||
|
||||
// The service name that should be used for signing requests.
|
||||
SigningName string
|
||||
|
||||
// States that the signing name for this endpoint was derived from metadata
|
||||
// passed in, but was not explicitly modeled.
|
||||
SigningNameDerived bool
|
||||
|
||||
// The signing method that should be used for signing requests.
|
||||
SigningMethod string
|
||||
}
|
||||
|
||||
// So that the Error interface type can be included as an anonymous field
|
||||
// in the requestError struct and not conflict with the error.Error() method.
|
||||
type awsError awserr.Error
|
||||
|
||||
// A EndpointNotFoundError is returned when in StrictMatching mode, and the
|
||||
// endpoint for the service and region cannot be found in any of the partitions.
|
||||
type EndpointNotFoundError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Region string
|
||||
}
|
||||
|
||||
// A UnknownServiceError is returned when the service does not resolve to an
|
||||
// endpoint. Includes a list of all known services for the partition. Returned
|
||||
// when a partition does not support the service.
|
||||
type UnknownServiceError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Known []string
|
||||
}
|
||||
|
||||
// NewUnknownServiceError builds and returns UnknownServiceError.
|
||||
func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
|
||||
return UnknownServiceError{
|
||||
awsError: awserr.New("UnknownServiceError",
|
||||
"could not resolve endpoint for unknown service", nil),
|
||||
Partition: p,
|
||||
Service: s,
|
||||
Known: known,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownServiceError) Error() string {
|
||||
extra := fmt.Sprintf("partition: %q, service: %q",
|
||||
e.Partition, e.Service)
|
||||
if len(e.Known) > 0 {
|
||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownServiceError) String() string {
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// A UnknownEndpointError is returned when in StrictMatching mode and the
|
||||
// service is valid, but the region does not resolve to an endpoint. Includes
|
||||
// a list of all known endpoints for the service.
|
||||
type UnknownEndpointError struct {
|
||||
awsError
|
||||
Partition string
|
||||
Service string
|
||||
Region string
|
||||
Known []string
|
||||
}
|
||||
|
||||
// NewUnknownEndpointError builds and returns UnknownEndpointError.
|
||||
func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
|
||||
return UnknownEndpointError{
|
||||
awsError: awserr.New("UnknownEndpointError",
|
||||
"could not resolve endpoint", nil),
|
||||
Partition: p,
|
||||
Service: s,
|
||||
Region: r,
|
||||
Known: known,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownEndpointError) Error() string {
|
||||
extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
|
||||
e.Partition, e.Service, e.Region)
|
||||
if len(e.Known) > 0 {
|
||||
extra += fmt.Sprintf(", known: %v", e.Known)
|
||||
}
|
||||
return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
|
||||
}
|
||||
|
||||
// String returns the string representation of the error.
|
||||
func (e UnknownEndpointError) String() string {
|
||||
return e.Error()
|
||||
}
|
307
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
Normal file
307
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type partitions []partition
|
||||
|
||||
func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
|
||||
var opt Options
|
||||
opt.Set(opts...)
|
||||
|
||||
for i := 0; i < len(ps); i++ {
|
||||
if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
|
||||
continue
|
||||
}
|
||||
|
||||
return ps[i].EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// If loose matching fallback to first partition format to use
|
||||
// when resolving the endpoint.
|
||||
if !opt.StrictMatching && len(ps) > 0 {
|
||||
return ps[0].EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
|
||||
}
|
||||
|
||||
// Partitions satisfies the EnumPartitions interface and returns a list
|
||||
// of Partitions representing each partition represented in the SDK's
|
||||
// endpoints model.
|
||||
func (ps partitions) Partitions() []Partition {
|
||||
parts := make([]Partition, 0, len(ps))
|
||||
for i := 0; i < len(ps); i++ {
|
||||
parts = append(parts, ps[i].Partition())
|
||||
}
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
type partition struct {
|
||||
ID string `json:"partition"`
|
||||
Name string `json:"partitionName"`
|
||||
DNSSuffix string `json:"dnsSuffix"`
|
||||
RegionRegex regionRegex `json:"regionRegex"`
|
||||
Defaults endpoint `json:"defaults"`
|
||||
Regions regions `json:"regions"`
|
||||
Services services `json:"services"`
|
||||
}
|
||||
|
||||
func (p partition) Partition() Partition {
|
||||
return Partition{
|
||||
id: p.ID,
|
||||
p: &p,
|
||||
}
|
||||
}
|
||||
|
||||
func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
|
||||
s, hasService := p.Services[service]
|
||||
_, hasEndpoint := s.Endpoints[region]
|
||||
|
||||
if hasEndpoint && hasService {
|
||||
return true
|
||||
}
|
||||
|
||||
if strictMatch {
|
||||
return false
|
||||
}
|
||||
|
||||
return p.RegionRegex.MatchString(region)
|
||||
}
|
||||
|
||||
func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
|
||||
var opt Options
|
||||
opt.Set(opts...)
|
||||
|
||||
s, hasService := p.Services[service]
|
||||
if !(hasService || opt.ResolveUnknownService) {
|
||||
// Only return error if the resolver will not fallback to creating
|
||||
// endpoint based on service endpoint ID passed in.
|
||||
return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
|
||||
}
|
||||
|
||||
e, hasEndpoint := s.endpointForRegion(region)
|
||||
if !hasEndpoint && opt.StrictMatching {
|
||||
return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
|
||||
}
|
||||
|
||||
defs := []endpoint{p.Defaults, s.Defaults}
|
||||
return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
|
||||
}
|
||||
|
||||
func serviceList(ss services) []string {
|
||||
list := make([]string, 0, len(ss))
|
||||
for k := range ss {
|
||||
list = append(list, k)
|
||||
}
|
||||
return list
|
||||
}
|
||||
func endpointList(es endpoints) []string {
|
||||
list := make([]string, 0, len(es))
|
||||
for k := range es {
|
||||
list = append(list, k)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
type regionRegex struct {
|
||||
*regexp.Regexp
|
||||
}
|
||||
|
||||
func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
|
||||
// Strip leading and trailing quotes
|
||||
regex, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to strip quotes from regex, %v", err)
|
||||
}
|
||||
|
||||
rr.Regexp, err = regexp.Compile(regex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to unmarshal region regex, %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type regions map[string]region
|
||||
|
||||
type region struct {
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type services map[string]service
|
||||
|
||||
type service struct {
|
||||
PartitionEndpoint string `json:"partitionEndpoint"`
|
||||
IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
|
||||
Defaults endpoint `json:"defaults"`
|
||||
Endpoints endpoints `json:"endpoints"`
|
||||
}
|
||||
|
||||
func (s *service) endpointForRegion(region string) (endpoint, bool) {
|
||||
if s.IsRegionalized == boxedFalse {
|
||||
return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
|
||||
}
|
||||
|
||||
if e, ok := s.Endpoints[region]; ok {
|
||||
return e, true
|
||||
}
|
||||
|
||||
// Unable to find any matching endpoint, return
|
||||
// blank that will be used for generic endpoint creation.
|
||||
return endpoint{}, false
|
||||
}
|
||||
|
||||
type endpoints map[string]endpoint
|
||||
|
||||
type endpoint struct {
|
||||
Hostname string `json:"hostname"`
|
||||
Protocols []string `json:"protocols"`
|
||||
CredentialScope credentialScope `json:"credentialScope"`
|
||||
|
||||
// Custom fields not modeled
|
||||
HasDualStack boxedBool `json:"-"`
|
||||
DualStackHostname string `json:"-"`
|
||||
|
||||
// Signature Version not used
|
||||
SignatureVersions []string `json:"signatureVersions"`
|
||||
|
||||
// SSLCommonName not used.
|
||||
SSLCommonName string `json:"sslCommonName"`
|
||||
}
|
||||
|
||||
const (
|
||||
defaultProtocol = "https"
|
||||
defaultSigner = "v4"
|
||||
)
|
||||
|
||||
var (
|
||||
protocolPriority = []string{"https", "http"}
|
||||
signerPriority = []string{"v4", "v2"}
|
||||
)
|
||||
|
||||
func getByPriority(s []string, p []string, def string) string {
|
||||
if len(s) == 0 {
|
||||
return def
|
||||
}
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
for j := 0; j < len(s); j++ {
|
||||
if s[j] == p[i] {
|
||||
return s[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return s[0]
|
||||
}
|
||||
|
||||
func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
|
||||
var merged endpoint
|
||||
for _, def := range defs {
|
||||
merged.mergeIn(def)
|
||||
}
|
||||
merged.mergeIn(e)
|
||||
e = merged
|
||||
|
||||
hostname := e.Hostname
|
||||
|
||||
// Offset the hostname for dualstack if enabled
|
||||
if opts.UseDualStack && e.HasDualStack == boxedTrue {
|
||||
hostname = e.DualStackHostname
|
||||
}
|
||||
|
||||
u := strings.Replace(hostname, "{service}", service, 1)
|
||||
u = strings.Replace(u, "{region}", region, 1)
|
||||
u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
|
||||
|
||||
scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
|
||||
u = fmt.Sprintf("%s://%s", scheme, u)
|
||||
|
||||
signingRegion := e.CredentialScope.Region
|
||||
if len(signingRegion) == 0 {
|
||||
signingRegion = region
|
||||
}
|
||||
|
||||
signingName := e.CredentialScope.Service
|
||||
var signingNameDerived bool
|
||||
if len(signingName) == 0 {
|
||||
signingName = service
|
||||
signingNameDerived = true
|
||||
}
|
||||
|
||||
return ResolvedEndpoint{
|
||||
URL: u,
|
||||
SigningRegion: signingRegion,
|
||||
SigningName: signingName,
|
||||
SigningNameDerived: signingNameDerived,
|
||||
SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
|
||||
}
|
||||
}
|
||||
|
||||
func getEndpointScheme(protocols []string, disableSSL bool) string {
|
||||
if disableSSL {
|
||||
return "http"
|
||||
}
|
||||
|
||||
return getByPriority(protocols, protocolPriority, defaultProtocol)
|
||||
}
|
||||
|
||||
func (e *endpoint) mergeIn(other endpoint) {
|
||||
if len(other.Hostname) > 0 {
|
||||
e.Hostname = other.Hostname
|
||||
}
|
||||
if len(other.Protocols) > 0 {
|
||||
e.Protocols = other.Protocols
|
||||
}
|
||||
if len(other.SignatureVersions) > 0 {
|
||||
e.SignatureVersions = other.SignatureVersions
|
||||
}
|
||||
if len(other.CredentialScope.Region) > 0 {
|
||||
e.CredentialScope.Region = other.CredentialScope.Region
|
||||
}
|
||||
if len(other.CredentialScope.Service) > 0 {
|
||||
e.CredentialScope.Service = other.CredentialScope.Service
|
||||
}
|
||||
if len(other.SSLCommonName) > 0 {
|
||||
e.SSLCommonName = other.SSLCommonName
|
||||
}
|
||||
if other.HasDualStack != boxedBoolUnset {
|
||||
e.HasDualStack = other.HasDualStack
|
||||
}
|
||||
if len(other.DualStackHostname) > 0 {
|
||||
e.DualStackHostname = other.DualStackHostname
|
||||
}
|
||||
}
|
||||
|
||||
type credentialScope struct {
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
}
|
||||
|
||||
type boxedBool int
|
||||
|
||||
func (b *boxedBool) UnmarshalJSON(buf []byte) error {
|
||||
v, err := strconv.ParseBool(string(buf))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if v {
|
||||
*b = boxedTrue
|
||||
} else {
|
||||
*b = boxedFalse
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
boxedBoolUnset boxedBool = iota
|
||||
boxedFalse
|
||||
boxedTrue
|
||||
)
|
351
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
Normal file
351
src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
generated
vendored
Normal file
@ -0,0 +1,351 @@
|
||||
// +build codegen
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// A CodeGenOptions are the options for code generating the endpoints into
|
||||
// Go code from the endpoints model definition.
|
||||
type CodeGenOptions struct {
|
||||
// Options for how the model will be decoded.
|
||||
DecodeModelOptions DecodeModelOptions
|
||||
|
||||
// Disables code generation of the service endpoint prefix IDs defined in
|
||||
// the model.
|
||||
DisableGenerateServiceIDs bool
|
||||
}
|
||||
|
||||
// Set combines all of the option functions together
|
||||
func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
|
||||
for _, fn := range optFns {
|
||||
fn(d)
|
||||
}
|
||||
}
|
||||
|
||||
// CodeGenModel given a endpoints model file will decode it and attempt to
|
||||
// generate Go code from the model definition. Error will be returned if
|
||||
// the code is unable to be generated, or decoded.
|
||||
func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
|
||||
var opts CodeGenOptions
|
||||
opts.Set(optFns...)
|
||||
|
||||
resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
|
||||
*d = opts.DecodeModelOptions
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := struct {
|
||||
Resolver
|
||||
CodeGenOptions
|
||||
}{
|
||||
Resolver: resolver,
|
||||
CodeGenOptions: opts,
|
||||
}
|
||||
|
||||
tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
|
||||
if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
|
||||
return fmt.Errorf("failed to execute template, %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSymbol(v string) string {
|
||||
out := []rune{}
|
||||
for _, c := range strings.Title(v) {
|
||||
if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
|
||||
continue
|
||||
}
|
||||
|
||||
out = append(out, c)
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
||||
func quoteString(v string) string {
|
||||
return fmt.Sprintf("%q", v)
|
||||
}
|
||||
|
||||
func regionConstName(p, r string) string {
|
||||
return toSymbol(p) + toSymbol(r)
|
||||
}
|
||||
|
||||
func partitionGetter(id string) string {
|
||||
return fmt.Sprintf("%sPartition", toSymbol(id))
|
||||
}
|
||||
|
||||
func partitionVarName(id string) string {
|
||||
return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
|
||||
}
|
||||
|
||||
func listPartitionNames(ps partitions) string {
|
||||
names := []string{}
|
||||
switch len(ps) {
|
||||
case 1:
|
||||
return ps[0].Name
|
||||
case 2:
|
||||
return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
|
||||
default:
|
||||
for i, p := range ps {
|
||||
if i == len(ps)-1 {
|
||||
names = append(names, "and "+p.Name)
|
||||
} else {
|
||||
names = append(names, p.Name)
|
||||
}
|
||||
}
|
||||
return strings.Join(names, ", ")
|
||||
}
|
||||
}
|
||||
|
||||
func boxedBoolIfSet(msg string, v boxedBool) string {
|
||||
switch v {
|
||||
case boxedTrue:
|
||||
return fmt.Sprintf(msg, "boxedTrue")
|
||||
case boxedFalse:
|
||||
return fmt.Sprintf(msg, "boxedFalse")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func stringIfSet(msg, v string) string {
|
||||
if len(v) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf(msg, v)
|
||||
}
|
||||
|
||||
func stringSliceIfSet(msg string, vs []string) string {
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
for _, v := range vs {
|
||||
names = append(names, `"`+v+`"`)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(msg, strings.Join(names, ","))
|
||||
}
|
||||
|
||||
func endpointIsSet(v endpoint) bool {
|
||||
return !reflect.DeepEqual(v, endpoint{})
|
||||
}
|
||||
|
||||
func serviceSet(ps partitions) map[string]struct{} {
|
||||
set := map[string]struct{}{}
|
||||
for _, p := range ps {
|
||||
for id := range p.Services {
|
||||
set[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
var funcMap = template.FuncMap{
|
||||
"ToSymbol": toSymbol,
|
||||
"QuoteString": quoteString,
|
||||
"RegionConst": regionConstName,
|
||||
"PartitionGetter": partitionGetter,
|
||||
"PartitionVarName": partitionVarName,
|
||||
"ListPartitionNames": listPartitionNames,
|
||||
"BoxedBoolIfSet": boxedBoolIfSet,
|
||||
"StringIfSet": stringIfSet,
|
||||
"StringSliceIfSet": stringSliceIfSet,
|
||||
"EndpointIsSet": endpointIsSet,
|
||||
"ServicesSet": serviceSet,
|
||||
}
|
||||
|
||||
const v3Tmpl = `
|
||||
{{ define "defaults" -}}
|
||||
// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
|
||||
|
||||
package endpoints
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
{{ template "partition consts" $.Resolver }}
|
||||
|
||||
{{ range $_, $partition := $.Resolver }}
|
||||
{{ template "partition region consts" $partition }}
|
||||
{{ end }}
|
||||
|
||||
{{ if not $.DisableGenerateServiceIDs -}}
|
||||
{{ template "service consts" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ template "endpoint resolvers" $.Resolver }}
|
||||
{{- end }}
|
||||
|
||||
{{ define "partition consts" }}
|
||||
// Partition identifiers
|
||||
const (
|
||||
{{ range $_, $p := . -}}
|
||||
{{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "partition region consts" }}
|
||||
// {{ .Name }} partition's regions.
|
||||
const (
|
||||
{{ range $id, $region := .Regions -}}
|
||||
{{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "service consts" }}
|
||||
// Service identifiers
|
||||
const (
|
||||
{{ $serviceSet := ServicesSet . -}}
|
||||
{{ range $id, $_ := $serviceSet -}}
|
||||
{{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
|
||||
{{ end -}}
|
||||
)
|
||||
{{- end }}
|
||||
|
||||
{{ define "endpoint resolvers" }}
|
||||
// DefaultResolver returns an Endpoint resolver that will be able
|
||||
// to resolve endpoints for: {{ ListPartitionNames . }}.
|
||||
//
|
||||
// Use DefaultPartitions() to get the list of the default partitions.
|
||||
func DefaultResolver() Resolver {
|
||||
return defaultPartitions
|
||||
}
|
||||
|
||||
// DefaultPartitions returns a list of the partitions the SDK is bundled
|
||||
// with. The available partitions are: {{ ListPartitionNames . }}.
|
||||
//
|
||||
// partitions := endpoints.DefaultPartitions
|
||||
// for _, p := range partitions {
|
||||
// // ... inspect partitions
|
||||
// }
|
||||
func DefaultPartitions() []Partition {
|
||||
return defaultPartitions.Partitions()
|
||||
}
|
||||
|
||||
var defaultPartitions = partitions{
|
||||
{{ range $_, $partition := . -}}
|
||||
{{ PartitionVarName $partition.ID }},
|
||||
{{ end }}
|
||||
}
|
||||
|
||||
{{ range $_, $partition := . -}}
|
||||
{{ $name := PartitionGetter $partition.ID -}}
|
||||
// {{ $name }} returns the Resolver for {{ $partition.Name }}.
|
||||
func {{ $name }}() Partition {
|
||||
return {{ PartitionVarName $partition.ID }}.Partition()
|
||||
}
|
||||
var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ define "default partitions" }}
|
||||
func DefaultPartitions() []Partition {
|
||||
return []partition{
|
||||
{{ range $_, $partition := . -}}
|
||||
// {{ ToSymbol $partition.ID}}Partition(),
|
||||
{{ end }}
|
||||
}
|
||||
}
|
||||
{{ end }}
|
||||
|
||||
{{ define "gocode Partition" -}}
|
||||
partition{
|
||||
{{ StringIfSet "ID: %q,\n" .ID -}}
|
||||
{{ StringIfSet "Name: %q,\n" .Name -}}
|
||||
{{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
|
||||
RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
|
||||
{{ if EndpointIsSet .Defaults -}}
|
||||
Defaults: {{ template "gocode Endpoint" .Defaults }},
|
||||
{{- end }}
|
||||
Regions: {{ template "gocode Regions" .Regions }},
|
||||
Services: {{ template "gocode Services" .Services }},
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode RegionRegex" -}}
|
||||
regionRegex{
|
||||
Regexp: func() *regexp.Regexp{
|
||||
reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
|
||||
return reg
|
||||
}(),
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Regions" -}}
|
||||
regions{
|
||||
{{ range $id, $region := . -}}
|
||||
"{{ $id }}": {{ template "gocode Region" $region }},
|
||||
{{ end -}}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Region" -}}
|
||||
region{
|
||||
{{ StringIfSet "Description: %q,\n" .Description -}}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Services" -}}
|
||||
services{
|
||||
{{ range $id, $service := . -}}
|
||||
"{{ $id }}": {{ template "gocode Service" $service }},
|
||||
{{ end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Service" -}}
|
||||
service{
|
||||
{{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
|
||||
{{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
|
||||
{{ if EndpointIsSet .Defaults -}}
|
||||
Defaults: {{ template "gocode Endpoint" .Defaults -}},
|
||||
{{- end }}
|
||||
{{ if .Endpoints -}}
|
||||
Endpoints: {{ template "gocode Endpoints" .Endpoints }},
|
||||
{{- end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Endpoints" -}}
|
||||
endpoints{
|
||||
{{ range $id, $endpoint := . -}}
|
||||
"{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
|
||||
{{ end }}
|
||||
}
|
||||
{{- end }}
|
||||
|
||||
{{ define "gocode Endpoint" -}}
|
||||
endpoint{
|
||||
{{ StringIfSet "Hostname: %q,\n" .Hostname -}}
|
||||
{{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
|
||||
{{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
|
||||
{{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
|
||||
{{ if or .CredentialScope.Region .CredentialScope.Service -}}
|
||||
CredentialScope: credentialScope{
|
||||
{{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
|
||||
{{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
|
||||
},
|
||||
{{- end }}
|
||||
{{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
|
||||
{{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
|
||||
|
||||
}
|
||||
{{- end }}
|
||||
`
|
13
src/vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
13
src/vendor/github.com/aws/aws-sdk-go/aws/errors.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package aws
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
||||
var (
|
||||
// ErrMissingRegion is an error that is returned if region configuration is
|
||||
// not found.
|
||||
ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
|
||||
|
||||
// ErrMissingEndpoint is an error that is returned if an endpoint cannot be
|
||||
// resolved for a service.
|
||||
ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
|
||||
)
|
12
src/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
generated
vendored
Normal file
12
src/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package aws
|
||||
|
||||
// JSONValue is a representation of a grab bag type that will be marshaled
|
||||
// into a json string. This type can be used just like any other map.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// values := aws.JSONValue{
|
||||
// "Foo": "Bar",
|
||||
// }
|
||||
// values["Baz"] = "Qux"
|
||||
type JSONValue map[string]interface{}
|
118
src/vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
118
src/vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A LogLevelType defines the level logging should be performed at. Used to instruct
|
||||
// the SDK which statements should be logged.
|
||||
type LogLevelType uint
|
||||
|
||||
// LogLevel returns the pointer to a LogLevel. Should be used to workaround
|
||||
// not being able to take the address of a non-composite literal.
|
||||
func LogLevel(l LogLevelType) *LogLevelType {
|
||||
return &l
|
||||
}
|
||||
|
||||
// Value returns the LogLevel value or the default value LogOff if the LogLevel
|
||||
// is nil. Safe to use on nil value LogLevelTypes.
|
||||
func (l *LogLevelType) Value() LogLevelType {
|
||||
if l != nil {
|
||||
return *l
|
||||
}
|
||||
return LogOff
|
||||
}
|
||||
|
||||
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
|
||||
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
|
||||
// LogLevel is nil, will default to LogOff comparison.
|
||||
func (l *LogLevelType) Matches(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c&v == v
|
||||
}
|
||||
|
||||
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
|
||||
// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
|
||||
// to LogOff comparison.
|
||||
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
|
||||
c := l.Value()
|
||||
return c >= v
|
||||
}
|
||||
|
||||
const (
|
||||
// LogOff states that no logging should be performed by the SDK. This is the
|
||||
// default state of the SDK, and should be use to disable all logging.
|
||||
LogOff LogLevelType = iota * 0x1000
|
||||
|
||||
// LogDebug state that debug output should be logged by the SDK. This should
|
||||
// be used to inspect request made and responses received.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
// Debug Logging Sub Levels
|
||||
const (
|
||||
// LogDebugWithSigning states that the SDK should log request signing and
|
||||
// presigning events. This should be used to log the signing details of
|
||||
// requests for debugging. Will also enable LogDebug.
|
||||
LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
|
||||
|
||||
// LogDebugWithHTTPBody states the SDK should log HTTP request and response
|
||||
// HTTP bodys in addition to the headers and path. This should be used to
|
||||
// see the body content of requests and responses made while using the SDK
|
||||
// Will also enable LogDebug.
|
||||
LogDebugWithHTTPBody
|
||||
|
||||
// LogDebugWithRequestRetries states the SDK should log when service requests will
|
||||
// be retried. This should be used to log when you want to log when service
|
||||
// requests are being retried. Will also enable LogDebug.
|
||||
LogDebugWithRequestRetries
|
||||
|
||||
// LogDebugWithRequestErrors states the SDK should log when service requests fail
|
||||
// to build, send, validate, or unmarshal.
|
||||
LogDebugWithRequestErrors
|
||||
|
||||
// LogDebugWithEventStreamBody states the SDK should log EventStream
|
||||
// request and response bodys. This should be used to log the EventStream
|
||||
// wire unmarshaled message content of requests and responses made while
|
||||
// using the SDK Will also enable LogDebug.
|
||||
LogDebugWithEventStreamBody
|
||||
)
|
||||
|
||||
// A Logger is a minimalistic interface for the SDK to log messages to. Should
|
||||
// be used to provide custom logging writers for the SDK to use.
|
||||
type Logger interface {
|
||||
Log(...interface{})
|
||||
}
|
||||
|
||||
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||
// list of arguments and wrap it so the Logger interface can be used.
|
||||
//
|
||||
// Example:
|
||||
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||
// fmt.Fprintln(os.Stdout, args...)
|
||||
// })})
|
||||
type LoggerFunc func(...interface{})
|
||||
|
||||
// Log calls the wrapped function with the arguments provided
|
||||
func (f LoggerFunc) Log(args ...interface{}) {
|
||||
f(args...)
|
||||
}
|
||||
|
||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||
// use same formatting runes as the stdlib log.Logger
|
||||
func NewDefaultLogger() Logger {
|
||||
return &defaultLogger{
|
||||
logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||
}
|
||||
}
|
||||
|
||||
// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
|
||||
type defaultLogger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Log logs the parameters to the stdlib logger. See log.Println.
|
||||
func (l defaultLogger) Log(args ...interface{}) {
|
||||
l.logger.Println(args...)
|
||||
}
|
19
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
Normal file
19
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build !appengine,!plan9
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isErrConnectionReset(err error) bool {
|
||||
if opErr, ok := err.(*net.OpError); ok {
|
||||
if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
|
||||
return sysErr.Err == syscall.ECONNRESET
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
11
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
Normal file
11
src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build appengine plan9
|
||||
|
||||
package request
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func isErrConnectionReset(err error) bool {
|
||||
return strings.Contains(err.Error(), "connection reset")
|
||||
}
|
277
src/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
277
src/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A Handlers provides a collection of request handlers for various
|
||||
// stages of handling requests.
|
||||
type Handlers struct {
|
||||
Validate HandlerList
|
||||
Build HandlerList
|
||||
Sign HandlerList
|
||||
Send HandlerList
|
||||
ValidateResponse HandlerList
|
||||
Unmarshal HandlerList
|
||||
UnmarshalStream HandlerList
|
||||
UnmarshalMeta HandlerList
|
||||
UnmarshalError HandlerList
|
||||
Retry HandlerList
|
||||
AfterRetry HandlerList
|
||||
CompleteAttempt HandlerList
|
||||
Complete HandlerList
|
||||
}
|
||||
|
||||
// Copy returns of this handler's lists.
|
||||
func (h *Handlers) Copy() Handlers {
|
||||
return Handlers{
|
||||
Validate: h.Validate.copy(),
|
||||
Build: h.Build.copy(),
|
||||
Sign: h.Sign.copy(),
|
||||
Send: h.Send.copy(),
|
||||
ValidateResponse: h.ValidateResponse.copy(),
|
||||
Unmarshal: h.Unmarshal.copy(),
|
||||
UnmarshalStream: h.UnmarshalStream.copy(),
|
||||
UnmarshalError: h.UnmarshalError.copy(),
|
||||
UnmarshalMeta: h.UnmarshalMeta.copy(),
|
||||
Retry: h.Retry.copy(),
|
||||
AfterRetry: h.AfterRetry.copy(),
|
||||
CompleteAttempt: h.CompleteAttempt.copy(),
|
||||
Complete: h.Complete.copy(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes callback functions for all handlers
|
||||
func (h *Handlers) Clear() {
|
||||
h.Validate.Clear()
|
||||
h.Build.Clear()
|
||||
h.Send.Clear()
|
||||
h.Sign.Clear()
|
||||
h.Unmarshal.Clear()
|
||||
h.UnmarshalStream.Clear()
|
||||
h.UnmarshalMeta.Clear()
|
||||
h.UnmarshalError.Clear()
|
||||
h.ValidateResponse.Clear()
|
||||
h.Retry.Clear()
|
||||
h.AfterRetry.Clear()
|
||||
h.CompleteAttempt.Clear()
|
||||
h.Complete.Clear()
|
||||
}
|
||||
|
||||
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||
// is being run.
|
||||
type HandlerListRunItem struct {
|
||||
Index int
|
||||
Handler NamedHandler
|
||||
Request *Request
|
||||
}
|
||||
|
||||
// A HandlerList manages zero or more handlers in a list.
|
||||
type HandlerList struct {
|
||||
list []NamedHandler
|
||||
|
||||
// Called after each request handler in the list is called. If set
|
||||
// and the func returns true the HandlerList will continue to iterate
|
||||
// over the request handlers. If false is returned the HandlerList
|
||||
// will stop iterating.
|
||||
//
|
||||
// Should be used if extra logic to be performed between each handler
|
||||
// in the list. This can be used to terminate a list's iteration
|
||||
// based on a condition such as error like, HandlerListStopOnError.
|
||||
// Or for logging like HandlerListLogItem.
|
||||
AfterEachFn func(item HandlerListRunItem) bool
|
||||
}
|
||||
|
||||
// A NamedHandler is a struct that contains a name and function callback.
|
||||
type NamedHandler struct {
|
||||
Name string
|
||||
Fn func(*Request)
|
||||
}
|
||||
|
||||
// copy creates a copy of the handler list.
|
||||
func (l *HandlerList) copy() HandlerList {
|
||||
n := HandlerList{
|
||||
AfterEachFn: l.AfterEachFn,
|
||||
}
|
||||
if len(l.list) == 0 {
|
||||
return n
|
||||
}
|
||||
|
||||
n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
|
||||
return n
|
||||
}
|
||||
|
||||
// Clear clears the handler list.
|
||||
func (l *HandlerList) Clear() {
|
||||
l.list = l.list[0:0]
|
||||
}
|
||||
|
||||
// Len returns the number of handlers in the list.
|
||||
func (l *HandlerList) Len() int {
|
||||
return len(l.list)
|
||||
}
|
||||
|
||||
// PushBack pushes handler f to the back of the handler list.
|
||||
func (l *HandlerList) PushBack(f func(*Request)) {
|
||||
l.PushBackNamed(NamedHandler{"__anonymous", f})
|
||||
}
|
||||
|
||||
// PushBackNamed pushes named handler f to the back of the handler list.
|
||||
func (l *HandlerList) PushBackNamed(n NamedHandler) {
|
||||
if cap(l.list) == 0 {
|
||||
l.list = make([]NamedHandler, 0, 5)
|
||||
}
|
||||
l.list = append(l.list, n)
|
||||
}
|
||||
|
||||
// PushFront pushes handler f to the front of the handler list.
|
||||
func (l *HandlerList) PushFront(f func(*Request)) {
|
||||
l.PushFrontNamed(NamedHandler{"__anonymous", f})
|
||||
}
|
||||
|
||||
// PushFrontNamed pushes named handler f to the front of the handler list.
|
||||
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
|
||||
if cap(l.list) == len(l.list) {
|
||||
// Allocating new list required
|
||||
l.list = append([]NamedHandler{n}, l.list...)
|
||||
} else {
|
||||
// Enough room to prepend into list.
|
||||
l.list = append(l.list, NamedHandler{})
|
||||
copy(l.list[1:], l.list)
|
||||
l.list[0] = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes a NamedHandler n
|
||||
func (l *HandlerList) Remove(n NamedHandler) {
|
||||
l.RemoveByName(n.Name)
|
||||
}
|
||||
|
||||
// RemoveByName removes a NamedHandler by name.
|
||||
func (l *HandlerList) RemoveByName(name string) {
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
m := l.list[i]
|
||||
if m.Name == name {
|
||||
// Shift array preventing creating new arrays
|
||||
copy(l.list[i:], l.list[i+1:])
|
||||
l.list[len(l.list)-1] = NamedHandler{}
|
||||
l.list = l.list[:len(l.list)-1]
|
||||
|
||||
// decrement list so next check to length is correct
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SwapNamed will swap out any existing handlers with the same name as the
|
||||
// passed in NamedHandler returning true if handlers were swapped. False is
|
||||
// returned otherwise.
|
||||
func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == n.Name {
|
||||
l.list[i].Fn = n.Fn
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// Swap will swap out all handlers matching the name passed in. The matched
|
||||
// handlers will be swapped in. True is returned if the handlers were swapped.
|
||||
func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
|
||||
var swapped bool
|
||||
|
||||
for i := 0; i < len(l.list); i++ {
|
||||
if l.list[i].Name == name {
|
||||
l.list[i] = replace
|
||||
swapped = true
|
||||
}
|
||||
}
|
||||
|
||||
return swapped
|
||||
}
|
||||
|
||||
// SetBackNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the end of the list.
|
||||
func (l *HandlerList) SetBackNamed(n NamedHandler) {
|
||||
if !l.SwapNamed(n) {
|
||||
l.PushBackNamed(n)
|
||||
}
|
||||
}
|
||||
|
||||
// SetFrontNamed will replace the named handler if it exists in the handler list.
|
||||
// If the handler does not exist the handler will be added to the beginning of
|
||||
// the list.
|
||||
func (l *HandlerList) SetFrontNamed(n NamedHandler) {
|
||||
if !l.SwapNamed(n) {
|
||||
l.PushFrontNamed(n)
|
||||
}
|
||||
}
|
||||
|
||||
// Run executes all handlers in the list with a given request object.
|
||||
func (l *HandlerList) Run(r *Request) {
|
||||
for i, h := range l.list {
|
||||
h.Fn(r)
|
||||
item := HandlerListRunItem{
|
||||
Index: i, Handler: h, Request: r,
|
||||
}
|
||||
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HandlerListLogItem logs the request handler and the state of the
|
||||
// request's Error value. Always returns true to continue iterating
|
||||
// request handlers in a HandlerList.
|
||||
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||
if item.Request.Config.Logger == nil {
|
||||
return true
|
||||
}
|
||||
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||
item.Index, item.Handler.Name, item.Request.Error)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||
// over request handlers if Request.Error is not nil. True otherwise
|
||||
// to continue iterating.
|
||||
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||
return item.Request.Error == nil
|
||||
}
|
||||
|
||||
// WithAppendUserAgent will add a string to the user agent prefixed with a
|
||||
// single white space.
|
||||
func WithAppendUserAgent(s string) Option {
|
||||
return func(r *Request) {
|
||||
r.Handlers.Build.PushBack(func(r2 *Request) {
|
||||
AddToUserAgent(r, s)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||
// header. If the extra parameters are provided they will be added as metadata to the
|
||||
// name/version pair resulting in the following format.
|
||||
// "name/version (extra0; extra1; ...)"
|
||||
// The user agent part will be concatenated with this current request's user agent string.
|
||||
func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
|
||||
ua := fmt.Sprintf("%s/%s", name, version)
|
||||
if len(extra) > 0 {
|
||||
ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
|
||||
}
|
||||
return func(r *Request) {
|
||||
AddToUserAgent(r, ua)
|
||||
}
|
||||
}
|
||||
|
||||
// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
|
||||
// The input string will be concatenated with the current request's user agent string.
|
||||
func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
|
||||
return func(r *Request) {
|
||||
AddToUserAgent(r, s)
|
||||
}
|
||||
}
|
24
src/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
24
src/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package request
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||
req := new(http.Request)
|
||||
*req = *r
|
||||
req.URL = &url.URL{}
|
||||
*req.URL = *r.URL
|
||||
req.Body = body
|
||||
|
||||
req.Header = http.Header{}
|
||||
for k, v := range r.Header {
|
||||
for _, vv := range v {
|
||||
req.Header.Add(k, vv)
|
||||
}
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user