From 714f989759f3231873d974d4a5d618c63fda3043 Mon Sep 17 00:00:00 2001 From: Alvaro Iradier Date: Tue, 5 May 2020 23:13:51 +0200 Subject: [PATCH 01/11] Add options for automatic onboarding and username claim - Add an option in the UI to enable or disable the automatic user onboarding - Add an option to specify the claim name where the username is retrieved from. Signed-off-by: Alvaro Iradier --- src/common/config/metadata/metadatalist.go | 2 + src/common/const.go | 2 + src/common/models/config.go | 2 + src/common/utils/oidc/helper.go | 31 +++- src/core/config/config.go | 2 + src/core/config/config_test.go | 4 + src/core/controllers/oidc.go | 161 +++++++++++------- .../config/auth/config-auth.component.html | 63 +++++-- src/portal/src/i18n/lang/en-us-lang.json | 4 + .../src/lib/components/config/config.ts | 4 + 10 files changed, 189 insertions(+), 86 deletions(-) diff --git a/src/common/config/metadata/metadatalist.go b/src/common/config/metadata/metadatalist.go index dfc635b43..cf3ccc62b 100644 --- a/src/common/config/metadata/metadatalist.go +++ b/src/common/config/metadata/metadatalist.go @@ -140,7 +140,9 @@ var ( {Name: common.OIDCClientSecret, Scope: UserScope, Group: OIDCGroup, ItemType: &PasswordType{}}, {Name: common.OIDCGroupsClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCScope, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, + {Name: common.OIDCUserClaim, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}}, {Name: common.OIDCVerifyCert, Scope: UserScope, Group: OIDCGroup, DefaultValue: "true", ItemType: &BoolType{}}, + {Name: common.OIDCAutoOnboard, Scope: UserScope, Group: OIDCGroup, DefaultValue: "false", ItemType: &BoolType{}}, {Name: common.WithChartMuseum, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CHARTMUSEUM", DefaultValue: "false", ItemType: &BoolType{}, Editable: true}, {Name: common.WithClair, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CLAIR", DefaultValue: "false", ItemType: &BoolType{}, Editable: true}, diff --git a/src/common/const.go b/src/common/const.go index 229cd7136..6fb7c68b2 100755 --- a/src/common/const.go +++ b/src/common/const.go @@ -106,7 +106,9 @@ const ( OIDCClientSecret = "oidc_client_secret" OIDCVerifyCert = "oidc_verify_cert" OIDCGroupsClaim = "oidc_groups_claim" + OIDCAutoOnboard = "oidc_auto_onboard" OIDCScope = "oidc_scope" + OIDCUserClaim = "oidc_user_claim" CfgDriverDB = "db" NewHarborAdminName = "admin@harbor.local" diff --git a/src/common/models/config.go b/src/common/models/config.go index e208bb4fd..806969fd6 100644 --- a/src/common/models/config.go +++ b/src/common/models/config.go @@ -81,11 +81,13 @@ type OIDCSetting struct { Name string `json:"name"` Endpoint string `json:"endpoint"` VerifyCert bool `json:"verify_cert"` + AutoOnboard bool `json:"auto_onboard"` ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` GroupsClaim string `json:"groups_claim"` RedirectURL string `json:"redirect_url"` Scope []string `json:"scope"` + UserClaim string `json:"user_claim"` } // QuotaSetting wraps the settings for Quota diff --git a/src/common/utils/oidc/helper.go b/src/common/utils/oidc/helper.go index 5a9a1f416..0ba24b8f1 100644 --- a/src/common/utils/oidc/helper.go +++ b/src/common/utils/oidc/helper.go @@ -19,16 +19,17 @@ import ( "crypto/tls" "errors" "fmt" - gooidc "github.com/coreos/go-oidc" - "github.com/goharbor/harbor/src/common/models" - "github.com/goharbor/harbor/src/core/config" - "github.com/goharbor/harbor/src/lib/log" - "golang.org/x/oauth2" "net/http" "strings" "sync" "sync/atomic" "time" + + gooidc "github.com/coreos/go-oidc" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/core/config" + "github.com/goharbor/harbor/src/lib/log" + "golang.org/x/oauth2" ) const ( @@ -294,7 +295,7 @@ func userInfoFromRemote(ctx context.Context, token *Token, setting models.OIDCSe if err != nil { return nil, err } - return userInfoFromClaims(u, setting.GroupsClaim) + return userInfoFromClaims(u, setting.GroupsClaim, setting.UserClaim) } func userInfoFromIDToken(ctx context.Context, token *Token, setting models.OIDCSetting) (*UserInfo, error) { @@ -305,14 +306,28 @@ func userInfoFromIDToken(ctx context.Context, token *Token, setting models.OIDCS if err != nil { return nil, err } - return userInfoFromClaims(idt, setting.GroupsClaim) + + return userInfoFromClaims(idt, setting.GroupsClaim, setting.UserClaim) } -func userInfoFromClaims(c claimsProvider, g string) (*UserInfo, error) { +func userInfoFromClaims(c claimsProvider, g, u string) (*UserInfo, error) { res := &UserInfo{} if err := c.Claims(res); err != nil { return nil, err } + if u != "" { + allClaims := make(map[string]interface{}) + if err := c.Claims(&allClaims); err != nil { + return nil, err + } + + if username, ok := allClaims[u].(string); !ok { + log.Warningf("OIDC. Failed to recover Username from claim. Claim '%s' is empty", u) + } else { + res.Username = username + } + + } res.Groups, res.hasGroupClaim = GroupsFromClaims(c, g) return res, nil } diff --git a/src/core/config/config.go b/src/core/config/config.go index 4a24659eb..a67c91c52 100755 --- a/src/core/config/config.go +++ b/src/core/config/config.go @@ -440,11 +440,13 @@ func OIDCSetting() (*models.OIDCSetting, error) { Name: cfgMgr.Get(common.OIDCName).GetString(), Endpoint: cfgMgr.Get(common.OIDCEndpoint).GetString(), VerifyCert: cfgMgr.Get(common.OIDCVerifyCert).GetBool(), + AutoOnboard: cfgMgr.Get(common.OIDCAutoOnboard).GetBool(), ClientID: cfgMgr.Get(common.OIDCCLientID).GetString(), ClientSecret: cfgMgr.Get(common.OIDCClientSecret).GetString(), GroupsClaim: cfgMgr.Get(common.OIDCGroupsClaim).GetString(), RedirectURL: extEndpoint + common.OIDCCallbackPath, Scope: scope, + UserClaim: cfgMgr.Get(common.OIDCUserClaim).GetString(), }, nil } diff --git a/src/core/config/config_test.go b/src/core/config/config_test.go index 0993453e7..5a78f9baf 100644 --- a/src/core/config/config_test.go +++ b/src/core/config/config_test.go @@ -253,8 +253,10 @@ func TestOIDCSetting(t *testing.T) { common.OIDCName: "test", common.OIDCEndpoint: "https://oidc.test", common.OIDCVerifyCert: "true", + common.OIDCAutoOnboard: "false", common.OIDCScope: "openid, profile", common.OIDCGroupsClaim: "my_group", + common.OIDCUserClaim: "username", common.OIDCCLientID: "client", common.OIDCClientSecret: "secret", common.ExtEndpoint: "https://harbor.test", @@ -266,8 +268,10 @@ func TestOIDCSetting(t *testing.T) { assert.Equal(t, "https://oidc.test", v.Endpoint) assert.True(t, v.VerifyCert) assert.Equal(t, "my_group", v.GroupsClaim) + assert.False(t, v.AutoOnboard) assert.Equal(t, "client", v.ClientID) assert.Equal(t, "secret", v.ClientSecret) assert.Equal(t, "https://harbor.test/c/oidc/callback", v.RedirectURL) assert.ElementsMatch(t, []string{"openid", "profile"}, v.Scope) + assert.Equal(t, "username", v.UserClaim) } diff --git a/src/core/controllers/oidc.go b/src/core/controllers/oidc.go index 170c572d8..525686d1e 100644 --- a/src/core/controllers/oidc.go +++ b/src/core/controllers/oidc.go @@ -17,10 +17,11 @@ package controllers import ( "encoding/json" "fmt" - "github.com/goharbor/harbor/src/common/dao/group" "net/http" "strings" + "github.com/goharbor/harbor/src/common/dao/group" + "github.com/goharbor/harbor/src/common" "github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/models" @@ -122,30 +123,100 @@ func (oc *OIDCController) Callback() { } oc.SetSession(tokenKey, tokenBytes) - if u == nil { - oc.SetSession(userInfoKey, string(ouDataStr)) - oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", strings.Replace(info.Username, " ", "_", -1)), - http.StatusFound) - } else { - gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType)) - if err != nil { - log.Warningf("Failed to populate groups, error: %v, user will have empty group list, username: %s", err, info.Username) - } - u.GroupIDs = gids - oidcUser, err := dao.GetOIDCUserByUserID(u.UserID) - if err != nil { - oc.SendInternalServerError(err) - return - } - _, t, err := secretAndToken(tokenBytes) - oidcUser.Token = t - if err := dao.UpdateOIDCUser(oidcUser); err != nil { - oc.SendInternalServerError(err) - return - } - oc.PopulateUserSession(*u) - oc.Controller.Redirect("/", http.StatusFound) + oidcSettings, err := config.OIDCSetting() + if err != nil { + oc.SendInternalServerError(err) + return } + + if u == nil { + // Recover the username from d.Username by default + username := info.Username + + // Fix blanks in username + username = strings.Replace(username, " ", "_", -1) + + // If automatic onboard is enabled, skip the onboard page + if oidcSettings.AutoOnboard { + log.Debug("Doing automatic onboarding\n") + user, onboarded := userOnboard(oc, info, username, tokenBytes) + if onboarded == false { + log.Error("User not onboarded\n") + return + } + log.Debug("User automatically onboarded\n") + u = user + } else { + oc.SetSession(userInfoKey, string(ouDataStr)) + oc.Controller.Redirect(fmt.Sprintf("/oidc-onboard?username=%s", username), http.StatusFound) + // Once redirected, no further actions are done + return + } + } + + gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType)) + if err != nil { + log.Warningf("Failed to populate groups, error: %v, user will have empty group list, username: %s", err, info.Username) + } + u.GroupIDs = gids + oidcUser, err := dao.GetOIDCUserByUserID(u.UserID) + if err != nil { + oc.SendInternalServerError(err) + return + } + _, t, err := secretAndToken(tokenBytes) + oidcUser.Token = t + if err := dao.UpdateOIDCUser(oidcUser); err != nil { + oc.SendInternalServerError(err) + return + } + oc.PopulateUserSession(*u) + oc.Controller.Redirect("/", http.StatusFound) + +} + +func userOnboard(oc *OIDCController, info *oidc.UserInfo, username string, tokenBytes []byte) (*models.User, bool) { + s, t, err := secretAndToken(tokenBytes) + if err != nil { + oc.SendInternalServerError(err) + return nil, false + } + + gids, err := group.PopulateGroup(models.UserGroupsFromName(info.Groups, common.OIDCGroupType)) + if err != nil { + log.Warningf("Failed to populate group user will have empty group list. username: %s", username) + } + + oidcUser := models.OIDCUser{ + SubIss: info.Subject + info.Issuer, + Secret: s, + Token: t, + } + + user := models.User{ + Username: username, + Realname: username, + Email: info.Email, + GroupIDs: gids, + OIDCUserMeta: &oidcUser, + Comment: oidcUserComment, + } + + log.Debugf("User created: %+v\n", user) + + err = dao.OnBoardOIDCUser(&user) + if err != nil { + if strings.Contains(err.Error(), dao.ErrDupUser.Error()) { + oc.RenderError(http.StatusConflict, "Conflict, the user with same username or email has been onboarded.") + return nil, false + } + + oc.SendInternalServerError(err) + oc.DelSession(userInfoKey) + return nil, false + } + + return &user, true } // Onboard handles the request to onboard a user authenticated via OIDC provider @@ -176,51 +247,19 @@ func (oc *OIDCController) Onboard() { oc.SendBadRequestError(errors.New("Failed to get OIDC token from session")) return } - s, t, err := secretAndToken(tb) - if err != nil { - oc.SendInternalServerError(err) - return - } + d := &oidc.UserInfo{} - err = json.Unmarshal([]byte(userInfoStr), &d) + err := json.Unmarshal([]byte(userInfoStr), &d) if err != nil { oc.SendInternalServerError(err) return } - gids, err := group.PopulateGroup(models.UserGroupsFromName(d.Groups, common.OIDCGroupType)) - if err != nil { - log.Warningf("Failed to populate group user will have empty group list. username: %s", username) - } - oidcUser := models.OIDCUser{ - SubIss: d.Subject + d.Issuer, - Secret: s, - Token: t, - } - email := d.Email - user := models.User{ - Username: username, - Realname: d.Username, - Email: email, - GroupIDs: gids, - OIDCUserMeta: &oidcUser, - Comment: oidcUserComment, - } - - err = dao.OnBoardOIDCUser(&user) - if err != nil { - if strings.Contains(err.Error(), dao.ErrDupUser.Error()) { - oc.RenderError(http.StatusConflict, "Conflict, the user with same username or email has been onboarded.") - return - } - oc.SendInternalServerError(err) + if user, onboarded := userOnboard(oc, d, username, tb); onboarded { + user.OIDCUserMeta = nil oc.DelSession(userInfoKey) - return + oc.PopulateUserSession(*user) } - - user.OIDCUserMeta = nil - oc.DelSession(userInfoKey) - oc.PopulateUserSession(user) } func secretAndToken(tokenBytes []byte) (string, string, error) { diff --git a/src/portal/src/app/config/auth/config-auth.component.html b/src/portal/src/app/config/auth/config-auth.component.html index 794249a8c..a5d39bcee 100644 --- a/src/portal/src/app/config/auth/config-auth.component.html +++ b/src/portal/src/app/config/auth/config-auth.component.html @@ -374,24 +374,53 @@ [(ngModel)]="currentConfig.oidc_scope.value" id="oidcScope" size="40" required [disabled]="disabled(currentConfig.oidc_scope)" pattern="^(\w+,){0,}openid(,\w+){0,}$" /> {{'TOOLTIP.SCOPE_REQUIRED' | translate}} - - - - - - - + + + + + + + + + + + + + + + + +
{{ 'CONFIG.OIDC.OIDC_REDIREC_URL' | translate}} - {{redirectUrl}}/c/oidc/callback
+ {{redirectUrl}}/c/oidc/callback +
diff --git a/src/portal/src/i18n/lang/en-us-lang.json b/src/portal/src/i18n/lang/en-us-lang.json index 00284b26c..41ca7e579 100644 --- a/src/portal/src/i18n/lang/en-us-lang.json +++ b/src/portal/src/i18n/lang/en-us-lang.json @@ -102,6 +102,8 @@ "OIDC_VERIFYCERT": "Uncheck this box if your OIDC server is hosted via self-signed certificate.", "OIDC_GROUP_CLAIM": "The name of Claim in the ID token whose value is the list of group names.", "OIDC_GROUP_CLAIM_WARNING": "It can only contain letters, numbers, underscores, and the input length is no more than 256 characters.", + "OIDC_AUTOONBOARD": "Skip the onboarding screen, so user cannot change its username. Username is provided from ID Token", + "OIDC_USER_CLAIM": "The name of the claim in the ID Token where the username is retrieved from. If not specified, it will default to 'name'", "NEW_SECRET": "The secret must longer than 8 chars with at least 1 uppercase letter, 1 lowercase letter and 1 number" }, "PLACEHOLDER": { @@ -911,6 +913,8 @@ "CLIENTSECRET": "OIDC Client Secret", "SCOPE": "OIDC Scope", "OIDC_VERIFYCERT": "Verify Certificate", + "OIDC_AUTOONBOARD": "Automatic onboarding", + "USER_CLAIM": "OIDC Username Claim", "OIDC_SETNAME": "Set OIDC Username", "OIDC_SETNAMECONTENT": "You must create a Harbor username the first time when authenticating via a third party(OIDC).This will be used within Harbor to be associated with projects, roles, etc.", "OIDC_USERNAME": "Username", diff --git a/src/portal/src/lib/components/config/config.ts b/src/portal/src/lib/components/config/config.ts index 6149104d0..b32047b5b 100644 --- a/src/portal/src/lib/components/config/config.ts +++ b/src/portal/src/lib/components/config/config.ts @@ -98,7 +98,9 @@ export class Configuration { oidc_client_id?: StringValueItem; oidc_client_secret?: StringValueItem; oidc_verify_cert?: BoolValueItem; + oidc_auto_onboard?: BoolValueItem; oidc_scope?: StringValueItem; + oidc_user_claim?: StringValueItem; count_per_project: NumberValueItem; storage_per_project: NumberValueItem; cfg_expiration: NumberValueItem; @@ -155,8 +157,10 @@ export class Configuration { this.oidc_client_id = new StringValueItem('', true); this.oidc_client_secret = new StringValueItem('', true); this.oidc_verify_cert = new BoolValueItem(false, true); + this.oidc_auto_onboard = new BoolValueItem(false, true); this.oidc_scope = new StringValueItem('', true); this.oidc_groups_claim = new StringValueItem('', true); + this.oidc_user_claim = new StringValueItem('', true); this.count_per_project = new NumberValueItem(-1, true); this.storage_per_project = new NumberValueItem(-1, true); } From 6f88ff7429479d5c36c43f92269b11b6dcb644af Mon Sep 17 00:00:00 2001 From: Alvaro Iradier Date: Wed, 6 May 2020 02:02:06 +0200 Subject: [PATCH 02/11] Fix test suite and add test for userClaim Signed-off-by: Alvaro Iradier --- src/common/utils/oidc/helper_test.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/src/common/utils/oidc/helper_test.go b/src/common/utils/oidc/helper_test.go index 06dbac0de..96b2970ff 100644 --- a/src/common/utils/oidc/helper_test.go +++ b/src/common/utils/oidc/helper_test.go @@ -175,6 +175,7 @@ func TestUserInfoFromClaims(t *testing.T) { s := []struct { input map[string]interface{} groupClaim string + userClaim string expect *UserInfo }{ { @@ -184,6 +185,7 @@ func TestUserInfoFromClaims(t *testing.T) { "groups": []interface{}{"g1", "g2"}, }, groupClaim: "grouplist", + userClaim: "", expect: &UserInfo{ Issuer: "", Subject: "", @@ -200,6 +202,7 @@ func TestUserInfoFromClaims(t *testing.T) { "groups": []interface{}{"g1", "g2"}, }, groupClaim: "groups", + userClaim: "", expect: &UserInfo{ Issuer: "", Subject: "", @@ -218,6 +221,7 @@ func TestUserInfoFromClaims(t *testing.T) { "groupclaim": []interface{}{}, }, groupClaim: "groupclaim", + userClaim: "", expect: &UserInfo{ Issuer: "issuer", Subject: "subject000", @@ -227,9 +231,26 @@ func TestUserInfoFromClaims(t *testing.T) { hasGroupClaim: true, }, }, + { + input: map[string]interface{}{ + "name": "Alvaro", + "email": "airadier@gmail.com", + "groups": []interface{}{"g1", "g2"}, + }, + groupClaim: "grouplist", + userClaim: "email", + expect: &UserInfo{ + Issuer: "", + Subject: "", + Username: "airadier@gmail.com", + Email: "airadier@gmail.com", + Groups: []string{}, + hasGroupClaim: false, + }, + }, } for _, tc := range s { - out, err := userInfoFromClaims(&fakeClaims{tc.input}, tc.groupClaim) + out, err := userInfoFromClaims(&fakeClaims{tc.input}, tc.groupClaim, tc.userClaim) assert.Nil(t, err) assert.Equal(t, *tc.expect, *out) } From 81a7239c66a63aca2ec8e9746ede9957fd759754 Mon Sep 17 00:00:00 2001 From: Alvaro Iradier Date: Sun, 24 May 2020 23:43:29 +0200 Subject: [PATCH 03/11] Better error handling * Raise an internal error if username claim is not found, instead of just logging a warning * Don't remove userInfoKey for session on error when it is not required * Rename "OIDC Username Claim" to just "Username claim" Signed-off-by: Alvaro Iradier --- src/common/utils/oidc/helper.go | 8 ++++---- src/core/controllers/oidc.go | 2 +- src/portal/src/i18n/lang/en-us-lang.json | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/common/utils/oidc/helper.go b/src/common/utils/oidc/helper.go index 0ba24b8f1..1b4981156 100644 --- a/src/common/utils/oidc/helper.go +++ b/src/common/utils/oidc/helper.go @@ -321,11 +321,11 @@ func userInfoFromClaims(c claimsProvider, g, u string) (*UserInfo, error) { return nil, err } - if username, ok := allClaims[u].(string); !ok { - log.Warningf("OIDC. Failed to recover Username from claim. Claim '%s' is empty", u) - } else { - res.Username = username + username, ok := allClaims[u].(string) + if !ok { + return nil, fmt.Errorf("OIDC. Failed to recover Username from claim. Claim '%s' is invalid or not a string", u) } + res.Username = username } res.Groups, res.hasGroupClaim = GroupsFromClaims(c, g) diff --git a/src/core/controllers/oidc.go b/src/core/controllers/oidc.go index 525686d1e..20f3a971e 100644 --- a/src/core/controllers/oidc.go +++ b/src/core/controllers/oidc.go @@ -212,7 +212,6 @@ func userOnboard(oc *OIDCController, info *oidc.UserInfo, username string, token } oc.SendInternalServerError(err) - oc.DelSession(userInfoKey) return nil, false } @@ -260,6 +259,7 @@ func (oc *OIDCController) Onboard() { oc.DelSession(userInfoKey) oc.PopulateUserSession(*user) } + } func secretAndToken(tokenBytes []byte) (string, string, error) { diff --git a/src/portal/src/i18n/lang/en-us-lang.json b/src/portal/src/i18n/lang/en-us-lang.json index 41ca7e579..47c538879 100644 --- a/src/portal/src/i18n/lang/en-us-lang.json +++ b/src/portal/src/i18n/lang/en-us-lang.json @@ -914,7 +914,7 @@ "SCOPE": "OIDC Scope", "OIDC_VERIFYCERT": "Verify Certificate", "OIDC_AUTOONBOARD": "Automatic onboarding", - "USER_CLAIM": "OIDC Username Claim", + "USER_CLAIM": "Username Claim", "OIDC_SETNAME": "Set OIDC Username", "OIDC_SETNAMECONTENT": "You must create a Harbor username the first time when authenticating via a third party(OIDC).This will be used within Harbor to be associated with projects, roles, etc.", "OIDC_USERNAME": "Username", From e7c89ce1d995768804ecc989b8178abffb220ace Mon Sep 17 00:00:00 2001 From: Ziming Zhang Date: Thu, 16 Jul 2020 19:19:18 +0800 Subject: [PATCH 04/11] fix SWR replication adapter pull-based Signed-off-by: Ziming Zhang --- .../adapter/huawei/huawei_adapter.go | 22 ++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/replication/adapter/huawei/huawei_adapter.go b/src/replication/adapter/huawei/huawei_adapter.go index 4bd18c062..d1c2f9ce1 100644 --- a/src/replication/adapter/huawei/huawei_adapter.go +++ b/src/replication/adapter/huawei/huawei_adapter.go @@ -59,11 +59,23 @@ type adapter struct { // Info gets info about Huawei SWR func (a *adapter) Info() (*model.RegistryInfo, error) { registryInfo := model.RegistryInfo{ - Type: model.RegistryTypeHuawei, - Description: "Adapter for SWR -- The image registry of Huawei Cloud", - SupportedResourceTypes: []model.ResourceType{model.ResourceTypeImage}, - SupportedResourceFilters: []*model.FilterStyle{}, - SupportedTriggers: []model.TriggerType{}, + Type: model.RegistryTypeHuawei, + Description: "Adapter for SWR -- The image registry of Huawei Cloud", + SupportedResourceTypes: []model.ResourceType{model.ResourceTypeImage}, + SupportedResourceFilters: []*model.FilterStyle{ + { + Type: model.FilterTypeName, + Style: model.FilterStyleTypeText, + }, + { + Type: model.FilterTypeTag, + Style: model.FilterStyleTypeText, + }, + }, + SupportedTriggers: []model.TriggerType{ + model.TriggerTypeManual, + model.TriggerTypeScheduled, + }, } return ®istryInfo, nil } From 9493611666c6672876107aa9de0857df7a8c5896 Mon Sep 17 00:00:00 2001 From: Wenkai Yin Date: Fri, 17 Jul 2020 11:58:08 +0800 Subject: [PATCH 05/11] Don't return the error detail back to the client when adding registry Don't return the error detail back to the client when adding registry to avoid security issue Signed-off-by: Wenkai Yin --- src/core/api/registry.go | 41 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/src/core/api/registry.go b/src/core/api/registry.go index 81e254546..19fb0943b 100644 --- a/src/core/api/registry.go +++ b/src/core/api/registry.go @@ -6,7 +6,6 @@ import ( "net/http" "strconv" - common_http "github.com/goharbor/harbor/src/common/http" common_models "github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/core/api/models" @@ -111,21 +110,12 @@ func (t *RegistryAPI) Ping() { return } - status, err := registry.CheckHealthStatus(reg) - if err != nil { - e, ok := err.(*common_http.Error) - if ok && e.Code == http.StatusUnauthorized { - t.SendBadRequestError(errors.New("invalid credential")) - return - } - t.SendInternalServerError(fmt.Errorf("failed to check health of registry %s: %v", reg.URL, err)) + status := t.getHealthStatus(reg) + if status != model.Healthy { + t.SendBadRequestError(errors.New("the registry is unhealthy")) return } - if status != model.Healthy { - t.SendBadRequestError(errors.New("")) - return - } return } @@ -226,13 +216,9 @@ func (t *RegistryAPI) Post() { // Prevent SSRF security issue #3755 r.URL = url.Scheme + "://" + url.Host + url.Path - status, err := registry.CheckHealthStatus(r) - if err != nil { - t.SendBadRequestError(fmt.Errorf("health check to registry %s failed: %v", r.URL, err)) - return - } + status := t.getHealthStatus(r) if status != model.Healthy { - t.SendBadRequestError(fmt.Errorf("registry %s is unhealthy: %s", r.URL, status)) + t.SendBadRequestError(errors.New("the registry is unhealthy")) return } @@ -247,6 +233,15 @@ func (t *RegistryAPI) Post() { t.Redirect(http.StatusCreated, strconv.FormatInt(id, 10)) } +func (t *RegistryAPI) getHealthStatus(r *model.Registry) string { + status, err := registry.CheckHealthStatus(r) + if err != nil { + log.Errorf("failed to check the health status of registry %s: %v", r.URL, err) + return model.Unhealthy + } + return string(status) +} + // Put updates a registry func (t *RegistryAPI) Put() { id, err := t.GetIDFromURL() @@ -313,13 +308,9 @@ func (t *RegistryAPI) Put() { } } - status, err := registry.CheckHealthStatus(r) - if err != nil { - t.SendBadRequestError(fmt.Errorf("health check to registry %s failed: %v", r.URL, err)) - return - } + status := t.getHealthStatus(r) if status != model.Healthy { - t.SendBadRequestError(fmt.Errorf("registry %s is unhealthy: %s", r.URL, status)) + t.SendBadRequestError(errors.New("the registry is unhealthy")) return } From 54a1155140d95301f436618c10d62e7316e0509a Mon Sep 17 00:00:00 2001 From: Wenkai Yin Date: Fri, 17 Jul 2020 15:24:18 +0800 Subject: [PATCH 06/11] Prevent copying artifact to a proxy cache project Prevent copying artifact to a proxy cache project Signed-off-by: Wenkai Yin --- api/v2.0/swagger.yaml | 10 ++++++++++ src/server/v2.0/handler/artifact.go | 12 ++++++++++++ 2 files changed, 22 insertions(+) diff --git a/api/v2.0/swagger.yaml b/api/v2.0/swagger.yaml index bc41ce028..b4774d8ed 100644 --- a/api/v2.0/swagger.yaml +++ b/api/v2.0/swagger.yaml @@ -228,6 +228,8 @@ paths: $ref: '#/responses/403' '404': $ref: '#/responses/404' + '405': + $ref: '#/responses/405' '500': $ref: '#/responses/500' /projects/{project_name}/repositories/{repository_name}/artifacts/{reference}: @@ -1323,6 +1325,14 @@ responses: type: string schema: $ref: '#/definitions/Errors' + '405': + description: Method not allowed + headers: + X-Request-Id: + description: The ID of the corresponding request for the response + type: string + schema: + $ref: '#/definitions/Errors' '409': description: Conflict headers: diff --git a/src/server/v2.0/handler/artifact.go b/src/server/v2.0/handler/artifact.go index 8609a04c7..7818b6fa3 100644 --- a/src/server/v2.0/handler/artifact.go +++ b/src/server/v2.0/handler/artifact.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "github.com/goharbor/harbor/src/controller/event/metadata" + "github.com/goharbor/harbor/src/controller/project" "github.com/goharbor/harbor/src/pkg/notification" "net/http" "strings" @@ -48,6 +49,7 @@ const ( func newArtifactAPI() *artifactAPI { return &artifactAPI{ artCtl: artifact.Ctl, + proCtl: project.Ctl, repoCtl: repository.Ctl, scanCtl: scan.DefaultController, tagCtl: tag.Ctl, @@ -57,6 +59,7 @@ func newArtifactAPI() *artifactAPI { type artifactAPI struct { BaseAPI artCtl artifact.Controller + proCtl project.Controller repoCtl repository.Controller scanCtl scan.Controller tagCtl tag.Controller @@ -152,6 +155,15 @@ func (a *artifactAPI) CopyArtifact(ctx context.Context, params operation.CopyArt return a.SendError(ctx, err) } + pro, err := a.proCtl.GetByName(ctx, params.ProjectName) + if err != nil { + return a.SendError(ctx, err) + } + if pro.RegistryID > 0 { + return a.SendError(ctx, errors.New(nil).WithCode(errors.MethodNotAllowedCode). + WithMessage("cannot copy the artifact to a proxy cache project")) + } + srcRepo, ref, err := parse(params.From) if err != nil { return a.SendError(ctx, err) From 3345b8aae261993234ecf5e3f1857588bef2e073 Mon Sep 17 00:00:00 2001 From: wang yan Date: Thu, 16 Jul 2020 15:02:26 +0800 Subject: [PATCH 07/11] fix get manifest return code When to call, ~~~ REQUEST ~~~ GET /v2/conformance/testrepo/manifests/.INVALID_MANIFEST_NAME Per OCI distribution spec, it has to return 404, instead of 400 (project name required) Signed-off-by: wang yan --- src/server/middleware/repoproxy/proxy.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/server/middleware/repoproxy/proxy.go b/src/server/middleware/repoproxy/proxy.go index abf7cc073..565195b5d 100644 --- a/src/server/middleware/repoproxy/proxy.go +++ b/src/server/middleware/repoproxy/proxy.go @@ -73,7 +73,11 @@ func handleBlob(w http.ResponseWriter, r *http.Request, next http.Handler) error } func preCheck(ctx context.Context) (art lib.ArtifactInfo, p *models.Project, ctl proxy.Controller, err error) { + none := lib.ArtifactInfo{} art = lib.GetArtifactInfo(ctx) + if art == none { + return none, nil, nil, errors.New("artifactinfo is not found").WithCode(errors.NotFoundCode) + } ctl = proxy.ControllerInstance() p, err = project.Ctl.GetByName(ctx, art.ProjectName, project.Metadata(false)) return From d9ca9bbc69cdd565673a8dbd6d4af85a1332a9d0 Mon Sep 17 00:00:00 2001 From: Steven Zou Date: Fri, 17 Jul 2020 22:33:41 +0800 Subject: [PATCH 08/11] fix(jobservice):add job id in log getting error (#12513) Signed-off-by: Steven Zou --- src/jobservice/core/controller.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/jobservice/core/controller.go b/src/jobservice/core/controller.go index d3cbfc153..69aede3a9 100644 --- a/src/jobservice/core/controller.go +++ b/src/jobservice/core/controller.go @@ -135,7 +135,7 @@ func (bc *basicController) GetJobLogData(jobID string) ([]byte, error) { logData, err := logger.Retrieve(jobID) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "error for getting log of job %s", jobID) } return logData, nil From 8857e89e401fed965cc04e38546bcede223ceebb Mon Sep 17 00:00:00 2001 From: Ziming Zhang Date: Tue, 16 Jun 2020 00:20:18 +0800 Subject: [PATCH 09/11] feature(redis) support redis sentinel Signed-off-by: Ziming Zhang --- .github/workflows/CI.yml | 10 +- Makefile | 15 +- make/harbor.yml.tmpl | 9 +- make/photon/Makefile | 2 +- make/photon/chartserver/builder | 1 + make/photon/chartserver/compile.sh | 5 + make/photon/chartserver/redis.patch | 79 + .../migrations/version_2_0_0/harbor.yml.jinja | 18 +- .../prepare/templates/chartserver/env.jinja | 7 + make/photon/prepare/templates/core/env.jinja | 2 +- .../templates/registry/config.yml.jinja | 8 +- .../prepare/templates/trivy-adapter/env.jinja | 1 + make/photon/prepare/utils/chart.py | 63 +- make/photon/prepare/utils/configs.py | 70 +- make/photon/prepare/utils/core.py | 8 +- make/photon/prepare/utils/misc.py | 22 +- make/photon/prepare/utils/registry.py | 36 +- make/photon/registry/builder | 1 + make/photon/registry/redis.patch | 901 ++++++ src/chartserver/cache.go | 36 +- src/chartserver/redis_sentinel.go | 250 ++ src/chartserver/utils.go | 111 +- src/chartserver/utils_test.go | 54 +- src/common/utils/redis/helper.go | 232 -- src/common/utils/redis/helper_test.go | 102 - src/controller/blob/controller.go | 9 +- src/controller/quota/controller.go | 8 +- src/core/api/health.go | 16 +- src/core/main.go | 61 +- src/go.mod | 2 +- src/go.sum | 10 +- src/jobservice/common/utils/utils.go | 35 - src/jobservice/config/config.go | 14 +- src/jobservice/config/config_test.go | 4 +- src/jobservice/config_test.yml | 2 +- .../job/impl/gc/garbage_collection.go | 32 +- src/jobservice/job/impl/gc/util.go | 2 +- src/jobservice/runtime/bootstrap.go | 37 +- src/jobservice/tests/utils.go | 25 +- src/lib/redis/helper.go | 72 + src/lib/redis/helper_test.go | 40 + src/lib/redis/redisclient.go | 172 ++ .../github.com/FZambia/sentinel/.gitignore | 24 + .../redigo => FZambia/sentinel}/LICENSE | 28 +- .../github.com/FZambia/sentinel/README.md | 39 + .../github.com/FZambia/sentinel/sentinel.go | 421 +++ .../redis_sentinel/sess_redis_sentinel.go | 234 ++ .../garyburd/redigo/internal/commandinfo.go | 54 - .../github.com/garyburd/redigo/redis/conn.go | 673 ----- .../github.com/garyburd/redigo/redis/doc.go | 177 -- .../github.com/garyburd/redigo/redis/go16.go | 27 - .../github.com/garyburd/redigo/redis/go17.go | 29 - .../github.com/garyburd/redigo/redis/go18.go | 9 - .../github.com/garyburd/redigo/redis/log.go | 134 - .../github.com/garyburd/redigo/redis/pool.go | 527 ---- .../garyburd/redigo/redis/pool17.go | 35 - .../garyburd/redigo/redis/pubsub.go | 157 -- .../github.com/garyburd/redigo/redis/redis.go | 117 - .../github.com/garyburd/redigo/redis/reply.go | 479 ---- .../github.com/garyburd/redigo/redis/scan.go | 585 ---- .../garyburd/redigo/redis/script.go | 91 - .../github.com/go-redis/redis/.gitignore | 2 + .../github.com/go-redis/redis/.travis.yml | 21 + .../github.com/go-redis/redis/CHANGELOG.md | 25 + src/vendor/github.com/go-redis/redis/LICENSE | 25 + src/vendor/github.com/go-redis/redis/Makefile | 20 + .../github.com/go-redis/redis/README.md | 146 + .../github.com/go-redis/redis/cluster.go | 1649 +++++++++++ .../go-redis/redis/cluster_commands.go | 22 + .../github.com/go-redis/redis/command.go | 1874 +++++++++++++ .../github.com/go-redis/redis/commands.go | 2498 +++++++++++++++++ src/vendor/github.com/go-redis/redis/doc.go | 4 + .../internal/consistenthash/consistenthash.go | 81 + .../go-redis/redis/internal/error.go | 84 + .../redis/internal/hashtag/hashtag.go | 77 + .../go-redis/redis/internal/internal.go | 24 + .../github.com/go-redis/redis/internal/log.go | 15 + .../go-redis/redis/internal/once.go | 60 + .../go-redis/redis/internal/pool/conn.go | 93 + .../go-redis/redis/internal/pool/pool.go | 476 ++++ .../redis/internal/pool/pool_single.go | 53 + .../redis/internal/pool/pool_sticky.go | 109 + .../go-redis/redis/internal/proto/reader.go | 290 ++ .../go-redis/redis/internal/proto/scan.go | 166 ++ .../go-redis/redis/internal/proto/writer.go | 159 ++ .../internal/singleflight/singleflight.go | 64 + .../go-redis/redis/internal/util.go | 29 + .../go-redis/redis/internal/util/safe.go | 11 + .../go-redis/redis/internal/util/strconv.go | 19 + .../go-redis/redis/internal/util/unsafe.go | 22 + .../github.com/go-redis/redis/iterator.go | 73 + .../github.com/go-redis/redis/options.go | 212 ++ .../github.com/go-redis/redis/pipeline.go | 113 + .../github.com/go-redis/redis/pubsub.go | 473 ++++ src/vendor/github.com/go-redis/redis/redis.go | 524 ++++ .../github.com/go-redis/redis/result.go | 140 + src/vendor/github.com/go-redis/redis/ring.go | 658 +++++ .../github.com/go-redis/redis/script.go | 62 + .../github.com/go-redis/redis/sentinel.go | 369 +++ src/vendor/github.com/go-redis/redis/tx.go | 110 + .../github.com/go-redis/redis/universal.go | 179 ++ src/vendor/modules.txt | 17 +- tests/ci/api_common_install.sh | 1 + tests/resources/Util.robot | 1 + 104 files changed, 13729 insertions(+), 3745 deletions(-) create mode 100644 make/photon/chartserver/redis.patch create mode 100644 make/photon/registry/redis.patch create mode 100644 src/chartserver/redis_sentinel.go delete mode 100644 src/common/utils/redis/helper.go delete mode 100644 src/common/utils/redis/helper_test.go create mode 100644 src/lib/redis/helper.go create mode 100644 src/lib/redis/helper_test.go create mode 100644 src/lib/redis/redisclient.go create mode 100644 src/vendor/github.com/FZambia/sentinel/.gitignore rename src/vendor/github.com/{garyburd/redigo => FZambia/sentinel}/LICENSE (89%) create mode 100644 src/vendor/github.com/FZambia/sentinel/README.md create mode 100644 src/vendor/github.com/FZambia/sentinel/sentinel.go create mode 100644 src/vendor/github.com/astaxie/beego/session/redis_sentinel/sess_redis_sentinel.go delete mode 100644 src/vendor/github.com/garyburd/redigo/internal/commandinfo.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/conn.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/doc.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/go16.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/go17.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/go18.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/log.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/pool.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/pool17.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/pubsub.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/redis.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/reply.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/scan.go delete mode 100644 src/vendor/github.com/garyburd/redigo/redis/script.go create mode 100644 src/vendor/github.com/go-redis/redis/.gitignore create mode 100644 src/vendor/github.com/go-redis/redis/.travis.yml create mode 100644 src/vendor/github.com/go-redis/redis/CHANGELOG.md create mode 100644 src/vendor/github.com/go-redis/redis/LICENSE create mode 100644 src/vendor/github.com/go-redis/redis/Makefile create mode 100644 src/vendor/github.com/go-redis/redis/README.md create mode 100644 src/vendor/github.com/go-redis/redis/cluster.go create mode 100644 src/vendor/github.com/go-redis/redis/cluster_commands.go create mode 100644 src/vendor/github.com/go-redis/redis/command.go create mode 100644 src/vendor/github.com/go-redis/redis/commands.go create mode 100644 src/vendor/github.com/go-redis/redis/doc.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/consistenthash/consistenthash.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/error.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/hashtag/hashtag.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/internal.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/log.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/once.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/pool/conn.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/pool/pool.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/pool/pool_single.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/pool/pool_sticky.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/proto/reader.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/proto/scan.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/proto/writer.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/singleflight/singleflight.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/util.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/util/safe.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/util/strconv.go create mode 100644 src/vendor/github.com/go-redis/redis/internal/util/unsafe.go create mode 100644 src/vendor/github.com/go-redis/redis/iterator.go create mode 100644 src/vendor/github.com/go-redis/redis/options.go create mode 100644 src/vendor/github.com/go-redis/redis/pipeline.go create mode 100644 src/vendor/github.com/go-redis/redis/pubsub.go create mode 100644 src/vendor/github.com/go-redis/redis/redis.go create mode 100644 src/vendor/github.com/go-redis/redis/result.go create mode 100644 src/vendor/github.com/go-redis/redis/ring.go create mode 100644 src/vendor/github.com/go-redis/redis/script.go create mode 100644 src/vendor/github.com/go-redis/redis/sentinel.go create mode 100644 src/vendor/github.com/go-redis/redis/tx.go create mode 100644 src/vendor/github.com/go-redis/redis/universal.go diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 17a7f08eb..df88660c9 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -116,6 +116,8 @@ jobs: echo "::set-env name=GOPATH::$(go env GOPATH):$GITHUB_WORKSPACE" echo "::add-path::$(go env GOPATH)/bin" echo "::set-env name=TOKEN_PRIVATE_KEY_PATH::${GITHUB_WORKSPACE}/src/github.com/goharbor/harbor/tests/private_key.pem" + IP=`hostname -I | awk '{print $1}'` + echo "::set-env name=IP::$IP" shell: bash - name: before_install run: | @@ -129,20 +131,18 @@ jobs: curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose chmod +x docker-compose sudo mv docker-compose /usr/local/bin - IP=`hostname -I | awk '{print $1}'` echo '{"insecure-registries" : ["'$IP':5000"]}' | sudo tee /etc/docker/daemon.json - echo "::set-env name=IP::$IP" sudo cp ./tests/harbor_ca.crt /usr/local/share/ca-certificates/ sudo update-ca-certificates sudo service docker restart wget https://get.helm.sh/helm-v2.14.1-linux-386.tar.gz && tar zxvf helm-v2.14.1-linux-386.tar.gz sudo mv linux-386/helm /usr/local/bin/helm2 helm2 init --client-only - helm2 plugin install https://github.com/chartmuseum/helm-push + helm2 plugin list | grep push || helm2 plugin install https://github.com/chartmuseum/helm-push wget https://get.helm.sh/helm-v3.1.1-linux-386.tar.gz && tar zxvf helm-v3.1.1-linux-386.tar.gz sudo mv linux-386/helm /usr/local/bin/helm3 - helm3 plugin install https://github.com/chartmuseum/helm-push - mkdir -p $CNAB_PATH && cd $CNAB_PATH && git clone https://github.com/cnabio/cnab-to-oci.git + helm3 plugin list | grep push || helm3 plugin install https://github.com/chartmuseum/helm-push + rm -rf $CNAB_PATH;mkdir -p $CNAB_PATH && cd $CNAB_PATH && git clone https://github.com/cnabio/cnab-to-oci.git cd cnab-to-oci && git checkout v0.3.0-beta4 go list make build diff --git a/Makefile b/Makefile index 3a994d5d2..3f4d7be43 100644 --- a/Makefile +++ b/Makefile @@ -98,16 +98,19 @@ PKGVERSIONTAG=dev PREPARE_VERSION_NAME=versions #versions -REGISTRYVERSION=v2.7.1-patch-2819-2553 +REGISTRYVERSION=v2.7.1-patch-2819-2553-redis NOTARYVERSION=v0.6.1 CLAIRVERSION=v2.1.4 NOTARYMIGRATEVERSION=v3.5.4 -CLAIRADAPTERVERSION=v1.0.2 +CLAIRADAPTERVERSION=v1.1.0-rc1 TRIVYVERSION=v0.9.1 -TRIVYADAPTERVERSION=v0.12.0 +TRIVYADAPTERVERSION=v0.13.0 # version of chartmuseum -CHARTMUSEUMVERSION=v0.12.0 +CHARTMUSEUMVERSION=v0.12.0-redis + +# version of chartmuseum for pulling the source code +CHARTMUSEUM_SRC_TAG=v0.12.0 # version of registry for pulling the source code REGISTRY_SRC_TAG=v2.7.1 @@ -375,7 +378,7 @@ build: -e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \ -e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRADAPTERVERSION=$(CLAIRADAPTERVERSION) -e VERSIONTAG=$(VERSIONTAG) \ -e BUILDBIN=$(BUILDBIN) \ - -e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \ + -e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e CHARTMUSEUM_SRC_TAG=$(CHARTMUSEUM_SRC_TAG) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER) \ -e NPM_REGISTRY=$(NPM_REGISTRY) -e BASEIMAGETAG=$(BASEIMAGETAG) -e BASEIMAGENAMESPACE=$(BASEIMAGENAMESPACE) \ -e CLAIRURL=$(CLAIRURL) -e CHARTURL=$(CHARTURL) -e NORARYURL=$(NORARYURL) -e REGISTRYURL=$(REGISTRYURL) -e CLAIR_ADAPTER_DOWNLOAD_URL=$(CLAIR_ADAPTER_DOWNLOAD_URL) \ -e TRIVY_DOWNLOAD_URL=$(TRIVY_DOWNLOAD_URL) -e TRIVY_ADAPTER_DOWNLOAD_URL=$(TRIVY_ADAPTER_DOWNLOAD_URL) @@ -383,7 +386,7 @@ build: build_base_docker: @for name in chartserver clair clair-adapter trivy-adapter core db jobservice log nginx notary-server notary-signer portal prepare redis registry registryctl; do \ echo $$name ; \ - $(DOCKERBUILD) --pull -f $(MAKEFILEPATH_PHOTON)/$$name/Dockerfile.base -t $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) --label base-build-date=$(date +"%Y%m%d") . && \ + $(DOCKERBUILD) --pull --no-cache -f $(MAKEFILEPATH_PHOTON)/$$name/Dockerfile.base -t $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) --label base-build-date=$(date +"%Y%m%d") . && \ $(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(BASEIMAGENAMESPACE)/harbor-$$name-base:$(BASEIMAGETAG) $(REGISTRYUSER) $(REGISTRYPASSWORD) || exit 1; \ done diff --git a/make/harbor.yml.tmpl b/make/harbor.yml.tmpl index 6d6228609..020b08d46 100644 --- a/make/harbor.yml.tmpl +++ b/make/harbor.yml.tmpl @@ -171,9 +171,14 @@ _version: 2.0.0 # Uncomment external_redis if using external Redis server # external_redis: -# host: redis -# port: 6379 +# # support redis, redis+sentinel +# # host for redis: : +# # host for redis+sentinel: +# # :,:,: +# host: redis:6379 # password: +# # sentinel_master_set must be set to support redis+sentinel +# #sentinel_master_set: # # db_index 0 is for core, it's unchangeable # registry_db_index: 1 # jobservice_db_index: 2 diff --git a/make/photon/Makefile b/make/photon/Makefile index a54012b20..8019e5f4d 100644 --- a/make/photon/Makefile +++ b/make/photon/Makefile @@ -192,7 +192,7 @@ _build_chart_server: rm -rf $(DOCKERFILEPATH_CHART_SERVER)/binary && mkdir -p $(DOCKERFILEPATH_CHART_SERVER)/binary && \ $(call _get_binary, $(CHARTURL), $(DOCKERFILEPATH_CHART_SERVER)/binary/chartm); \ else \ - cd $(DOCKERFILEPATH_CHART_SERVER) && $(DOCKERFILEPATH_CHART_SERVER)/builder $(GOBUILDIMAGE) $(CHART_SERVER_CODE_BASE) $(CHARTMUSEUMVERSION) $(CHART_SERVER_MAIN_PATH) $(CHART_SERVER_BIN_NAME) && cd - ; \ + cd $(DOCKERFILEPATH_CHART_SERVER) && $(DOCKERFILEPATH_CHART_SERVER)/builder $(GOBUILDIMAGE) $(CHART_SERVER_CODE_BASE) $(CHARTMUSEUM_SRC_TAG) $(CHART_SERVER_MAIN_PATH) $(CHART_SERVER_BIN_NAME) && cd - ; \ fi ; \ echo "building chartmuseum container for photon..." ; \ $(DOCKERBUILD) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_CHART_SERVER)/$(DOCKERFILENAME_CHART_SERVER) -t $(DOCKERIMAGENAME_CHART_SERVER):$(VERSIONTAG) . ; \ diff --git a/make/photon/chartserver/builder b/make/photon/chartserver/builder index 52d2c5e77..f5e671bf5 100755 --- a/make/photon/chartserver/builder +++ b/make/photon/chartserver/builder @@ -26,6 +26,7 @@ cur=$PWD mkdir -p binary rm -rf binary/$BIN_NAME || true cp compile.sh binary/ +cp *.patch binary/ docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME diff --git a/make/photon/chartserver/compile.sh b/make/photon/chartserver/compile.sh index 4634c6d15..6eb2bd5ef 100644 --- a/make/photon/chartserver/compile.sh +++ b/make/photon/chartserver/compile.sh @@ -26,6 +26,11 @@ set -e cd $SRC_PATH git checkout tags/$VERSION -b $VERSION +#Patch +for p in $(ls /go/bin/*.patch); do + git apply $p || exit /b 1 +done + #Compile cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME mv $BIN_NAME /go/bin/ diff --git a/make/photon/chartserver/redis.patch b/make/photon/chartserver/redis.patch new file mode 100644 index 000000000..ba95873e4 --- /dev/null +++ b/make/photon/chartserver/redis.patch @@ -0,0 +1,79 @@ +diff --git a/cmd/chartmuseum/main.go b/cmd/chartmuseum/main.go +index e2d8ec0..116b1d4 100644 +--- a/cmd/chartmuseum/main.go ++++ b/cmd/chartmuseum/main.go +@@ -264,6 +264,8 @@ func storeFromConfig(conf *config.Config) cache.Store { + switch cacheFlag { + case "redis": + store = redisCacheFromConfig(conf) ++ case "redis_sentinel": ++ store = redisSentinelCacheFromConfig(conf) + default: + crash("Unsupported cache store: ", cacheFlag) + } +@@ -280,6 +282,16 @@ func redisCacheFromConfig(conf *config.Config) cache.Store { + )) + } + ++func redisSentinelCacheFromConfig(conf *config.Config) cache.Store { ++ crashIfConfigMissingVars(conf, []string{"cache.redis.addr", "cache.redis.mastername"}) ++ return cache.Store(cache.NewRedisSentinelStore( ++ conf.GetString("cache.redis.mastername"), ++ strings.Split(conf.GetString("cache.redis.addr"), ","), ++ conf.GetString("cache.redis.password"), ++ conf.GetInt("cache.redis.db"), ++ )) ++} ++ + func crashIfConfigMissingVars(conf *config.Config, vars []string) { + missing := []string{} + for _, v := range vars { +diff --git a/pkg/cache/redis_sentinel.go b/pkg/cache/redis_sentinel.go +new file mode 100644 +index 0000000..0c73427 +--- /dev/null ++++ b/pkg/cache/redis_sentinel.go +@@ -0,0 +1,18 @@ ++package cache ++ ++import ( ++ "github.com/go-redis/redis" ++) ++ ++// NewRedisStore creates a new RedisStore ++func NewRedisSentinelStore(masterName string, sentinelAddrs []string, password string, db int) *RedisStore { ++ store := &RedisStore{} ++ redisClientOptions := &redis.FailoverOptions{ ++ MasterName: masterName, ++ SentinelAddrs: sentinelAddrs, ++ Password: password, ++ DB: db, ++ } ++ store.Client = redis.NewFailoverClient(redisClientOptions) ++ return store ++} +diff --git a/pkg/config/vars.go b/pkg/config/vars.go +index 2b30ec4..603eebc 100644 +--- a/pkg/config/vars.go ++++ b/pkg/config/vars.go +@@ -237,10 +237,19 @@ var configVars = map[string]configVar{ + Default: "", + CLIFlag: cli.StringFlag{ + Name: "cache-redis-addr", +- Usage: "address of Redis service (host:port)", ++ Usage: "address of Redis service (host:port), addresses of Redis+Sentinel service (host1:port1,host2:port2)", + EnvVar: "CACHE_REDIS_ADDR", + }, + }, ++ "cache.redis.mastername": { ++ Type: stringType, ++ Default: "", ++ CLIFlag: cli.StringFlag{ ++ Name: "cache-redis-mastername", ++ Usage: "address of Redis+Sentinel mastername", ++ EnvVar: "CACHE_REDIS_MASTERNAME", ++ }, ++ }, + "cache.redis.password": { + Type: stringType, + Default: "", diff --git a/make/photon/prepare/migrations/version_2_0_0/harbor.yml.jinja b/make/photon/prepare/migrations/version_2_0_0/harbor.yml.jinja index 44f859c53..19ba39a66 100644 --- a/make/photon/prepare/migrations/version_2_0_0/harbor.yml.jinja +++ b/make/photon/prepare/migrations/version_2_0_0/harbor.yml.jinja @@ -306,9 +306,14 @@ external_database: {% if external_redis is defined %} external_redis: - host: {{ external_redis.host }} - port: {{ external_redis.port }} + # support redis, redis+sentinel + # host for redis: : + # host for redis+sentinel: + # :,:,: + host: {{ external_redis.host }}:{{ external_redis.port }} password: {{ external_redis.password }} + # sentinel_master_set must be set to support redis+sentinel + #sentinel_master_set: # db_index 0 is for core, it's unchangeable registry_db_index: {{ external_redis.registry_db_index }} jobservice_db_index: {{ external_redis.jobservice_db_index }} @@ -319,9 +324,14 @@ external_redis: {% else %} # Umcomments external_redis if using external Redis server # external_redis: -# host: redis -# port: 6379 +# # support redis, redis+sentinel +# # host for redis: : +# # host for redis+sentinel: +# # :,:,: +# host: redis:6379 # password: +# # sentinel_master_set must be set to support redis+sentinel +# #sentinel_master_set: # # db_index 0 is for core, it's unchangeable # registry_db_index: 1 # jobservice_db_index: 2 diff --git a/make/photon/prepare/templates/chartserver/env.jinja b/make/photon/prepare/templates/chartserver/env.jinja index 84bede172..fb4d75c43 100644 --- a/make/photon/prepare/templates/chartserver/env.jinja +++ b/make/photon/prepare/templates/chartserver/env.jinja @@ -11,9 +11,16 @@ PORT=9999 # Only support redis now. If redis is setup, then enable cache CACHE={{cache_store}} +{% if cache_redis_mastername %} +CACHE_REDIS_ADDR={{cache_redis_addr}} +CACHE_REDIS_MASTERNAME={{cache_redis_mastername}} +CACHE_REDIS_PASSWORD={{cache_redis_password}} +CACHE_REDIS_DB={{cache_redis_db_index}} +{% else %} CACHE_REDIS_ADDR={{cache_redis_addr}} CACHE_REDIS_PASSWORD={{cache_redis_password}} CACHE_REDIS_DB={{cache_redis_db_index}} +{% endif %} # Credential for internal communication BASIC_AUTH_USER=chart_controller diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja index 1d4767359..13062a2cd 100644 --- a/make/photon/prepare/templates/core/env.jinja +++ b/make/photon/prepare/templates/core/env.jinja @@ -1,6 +1,6 @@ CONFIG_PATH=/etc/core/app.conf UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem -_REDIS_URL={{redis_host}}:{{redis_port}},100,{{redis_password}},0,{{redis_idle_timeout_seconds}} +_REDIS_URL_CORE={{redis_url_core}} SYNC_QUOTA=true CHART_CACHE_DRIVER={{chart_cache_driver}} _REDIS_URL_REG={{redis_url_reg}} diff --git a/make/photon/prepare/templates/registry/config.yml.jinja b/make/photon/prepare/templates/registry/config.yml.jinja index 9444726e3..b7d34cdbd 100644 --- a/make/photon/prepare/templates/registry/config.yml.jinja +++ b/make/photon/prepare/templates/registry/config.yml.jinja @@ -17,7 +17,13 @@ storage: disable: true {% endif %} redis: - addr: {{redis_host}}:{{redis_port}} +{% if sentinel_master_set %} + # sentinel hosts with comma + addr: {{redis_host}} + sentinelMasterSet: {{sentinel_master_set}} +{% else %} + addr: {{redis_host}} +{% endif %} password: {{redis_password}} db: {{redis_db_index_reg}} http: diff --git a/make/photon/prepare/templates/trivy-adapter/env.jinja b/make/photon/prepare/templates/trivy-adapter/env.jinja index b10eecbe4..da6b1cae6 100644 --- a/make/photon/prepare/templates/trivy-adapter/env.jinja +++ b/make/photon/prepare/templates/trivy-adapter/env.jinja @@ -1,4 +1,5 @@ SCANNER_LOG_LEVEL={{log_level}} +SCANNER_REDIS_URL={{trivy_redis_url}} SCANNER_STORE_REDIS_URL={{trivy_redis_url}} SCANNER_STORE_REDIS_NAMESPACE=harbor.scanner.trivy:store SCANNER_JOB_QUEUE_REDIS_URL={{trivy_redis_url}} diff --git a/make/photon/prepare/utils/chart.py b/make/photon/prepare/utils/chart.py index 89518e4d6..abcef200d 100644 --- a/make/photon/prepare/utils/chart.py +++ b/make/photon/prepare/utils/chart.py @@ -1,6 +1,7 @@ -import os, shutil - +import os +from urllib.parse import urlsplit from g import templates_dir, config_dir, data_dir, DEFAULT_UID, DEFAULT_GID + from .jinja import render_jinja from .misc import prepare_dir @@ -12,12 +13,29 @@ chart_museum_env = os.path.join(config_dir, "chartserver", "env") chart_museum_data_dir = os.path.join(data_dir, 'chart_storage') -def prepare_chartmuseum(config_dict): - redis_host = config_dict['redis_host'] - redis_port = config_dict['redis_port'] - redis_password = config_dict['redis_password'] - redis_db_index_chart = config_dict['redis_db_index_chart'] +def parse_redis(redis_url_chart): + u = urlsplit(redis_url_chart) + if not u.scheme or u.scheme == 'redis': + return { + 'cache_store': 'redis', + 'cache_redis_addr': u.netloc.split('@')[-1], + 'cache_redis_password': u.password or '', + 'cache_redis_db_index': u.path and int(u.path[1:]) or 0, + } + elif u.scheme == 'redis+sentinel': + return { + 'cache_store': 'redis_sentinel', + 'cache_redis_mastername': u.path.split('/')[1], + 'cache_redis_addr': u.netloc.split('@')[-1], + 'cache_redis_password': u.password or '', + 'cache_redis_db_index': len(u.path.split('/')) == 3 and int(u.path.split('/')[2]) or 0, + } + else: + raise Exception('bad redis url for chart:' + redis_url_chart) + + +def prepare_chartmuseum(config_dict): storage_provider_name = config_dict['storage_provider_name'] storage_provider_config_map = config_dict['storage_provider_config'] @@ -25,10 +43,7 @@ def prepare_chartmuseum(config_dict): prepare_dir(chart_museum_config_dir) # process redis info - cache_store = "redis" - cache_redis_password = redis_password - cache_redis_addr = "{}:{}".format(redis_host, redis_port) - cache_redis_db_index = redis_db_index_chart + cache_redis_ops = parse_redis(config_dict['redis_url_chart']) # process storage info @@ -85,8 +100,10 @@ def prepare_chartmuseum(config_dict): storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % bucket ) storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % endpoint ) storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') ) - storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % ( storage_provider_config_map.get("accesskeyid") or '') ) - storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % ( storage_provider_config_map.get("accesskeysecret") or '') ) + storage_provider_config_options.append( + "ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % (storage_provider_config_map.get("accesskeyid") or '')) + storage_provider_config_options.append( + "ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % (storage_provider_config_map.get("accesskeysecret") or '')) else: # use local file system storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage") @@ -95,15 +112,11 @@ def prepare_chartmuseum(config_dict): all_storage_provider_configs = ('\n').join(storage_provider_config_options) render_jinja( - chart_museum_env_temp, - chart_museum_env, - cache_store=cache_store, - cache_redis_addr=cache_redis_addr, - cache_redis_password=cache_redis_password, - cache_redis_db_index=cache_redis_db_index, - core_secret=config_dict['core_secret'], - storage_driver=storage_driver, - all_storage_driver_configs=all_storage_provider_configs, - public_url=config_dict['public_url'], - chart_absolute_url=config_dict['chart_absolute_url'], - internal_tls=config_dict['internal_tls']) \ No newline at end of file + chart_museum_env_temp, + chart_museum_env, + storage_driver=storage_driver, + all_storage_driver_configs=all_storage_provider_configs, + public_url=config_dict['public_url'], + chart_absolute_url=config_dict['chart_absolute_url'], + internal_tls=config_dict['internal_tls'], + **cache_redis_ops) diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py index e32596b1f..0084ee468 100644 --- a/make/photon/prepare/utils/configs.py +++ b/make/photon/prepare/utils/configs.py @@ -1,9 +1,9 @@ +import logging import os import yaml -import logging - -from models import InternalTLS +from urllib.parse import urlencode from g import versions_file_path, host_root_dir, DEFAULT_UID, INTERNAL_NO_PROXY_DN +from models import InternalTLS from utils.misc import generate_random_string, owner_can_read, other_can_read default_db_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns @@ -73,17 +73,6 @@ def validate(conf: dict, **kwargs): if uid != DEFAULT_UID and not other_can_read(st_mode): raise Exception(err_msg) - # Redis validate - redis_host = conf.get("redis_host") - if redis_host is None or len(redis_host) < 1: - raise Exception( - "Error: redis_host in harbor.yml needs to point to an endpoint of Redis server or cluster.") - - redis_port = conf.get("redis_port") - if redis_host is None or (redis_port < 1 or redis_port > 65535): - raise Exception( - "Error: redis_port in harbor.yml needs to point to the port of Redis server or cluster.") - # TODO: # If user enable trust cert dir, need check if the files in this dir is readable. @@ -372,24 +361,38 @@ def parse_yaml_config(config_file_path, with_notary, with_clair, with_trivy, wit def get_redis_url(db, redis=None): - """Returns redis url with format `redis://[arbitrary_username:password@]ipaddress:port/database_index` + """Returns redis url with format `redis://[arbitrary_username:password@]ipaddress:port/database_index?idle_timeout_seconds=30` >>> get_redis_url(1) 'redis://redis:6379/1' - >>> get_redis_url(1, {'host': 'localhost', 'password': 'password'}) + >>> get_redis_url(1, {'host': 'localhost:6379', 'password': 'password'}) 'redis://anonymous:password@localhost:6379/1' + >>> get_redis_url(1, {'host':'host1:26379,host2:26379', 'sentinel_master_set':'mymaster', 'password':'password1'}) + 'redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1' + >>> get_redis_url(1, {'host':'host1:26379,host2:26379', 'sentinel_master_set':'mymaster', 'password':'password1','idle_timeout_seconds':30}) + 'redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1?idle_timeout_seconds=30' + """ kwargs = { - 'host': 'redis', - 'port': 6379, + 'host': 'redis:6379', 'password': '', } kwargs.update(redis or {}) - kwargs['db'] = db + kwargs['scheme'] = kwargs.get('sentinel_master_set', None) and 'redis+sentinel' or 'redis' + kwargs['db_part'] = db and ("/%s" % db) or "" + kwargs['sentinel_part'] = kwargs.get('sentinel_master_set', None) and ("/" + kwargs['sentinel_master_set']) or '' + kwargs['password_part'] = kwargs.get('password', None) and (':%s@' % kwargs['password']) or '' - if kwargs['password']: - return "redis://anonymous:{password}@{host}:{port}/{db}".format(**kwargs) - return "redis://{host}:{port}/{db}".format(**kwargs) + return "{scheme}://{password_part}{host}{sentinel_part}{db_part}".format(**kwargs) + get_redis_url_param(kwargs) + + +def get_redis_url_param(redis=None): + params = {} + if redis and 'idle_timeout_seconds' in redis: + params['idle_timeout_seconds'] = redis['idle_timeout_seconds'] + if params: + return "?" + urlencode(params) + return "" def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True): @@ -437,8 +440,7 @@ def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True): # internal redis config as the default redis = { - 'host': 'redis', - 'port': 6379, + 'host': 'redis:6379', 'password': '', 'registry_db_index': 1, 'jobservice_db_index': 2, @@ -451,23 +453,15 @@ def get_redis_configs(external_redis=None, with_clair=True, with_trivy=True): # overwriting existing keys by external_redis redis.update({key: value for (key, value) in external_redis.items() if value}) - configs['redis_host'] = redis['host'] - configs['redis_port'] = redis['port'] - configs['redis_password'] = redis['password'] - configs['redis_db_index_reg'] = redis['registry_db_index'] - configs['redis_db_index_js'] = redis['jobservice_db_index'] - configs['redis_db_index_chart'] = redis['chartmuseum_db_index'] - configs['redis_idle_timeout_seconds'] = redis['idle_timeout_seconds'] - - configs['redis_url_js'] = get_redis_url(configs['redis_db_index_js'], redis) - configs['redis_url_reg'] = get_redis_url(configs['redis_db_index_reg'], redis) + configs['redis_url_core'] = get_redis_url(0, redis) + configs['redis_url_chart'] = get_redis_url(redis['chartmuseum_db_index'], redis) + configs['redis_url_js'] = get_redis_url(redis['jobservice_db_index'], redis) + configs['redis_url_reg'] = get_redis_url(redis['registry_db_index'], redis) if with_clair: - configs['redis_db_index_clair'] = redis['clair_db_index'] - configs['redis_url_clair'] = get_redis_url(configs['redis_db_index_clair'], redis) + configs['redis_url_clair'] = get_redis_url(redis['clair_db_index'], redis) if with_trivy: - configs['redis_db_index_trivy'] = redis['trivy_db_index'] - configs['trivy_redis_url'] = get_redis_url(configs['redis_db_index_trivy'], redis) + configs['trivy_redis_url'] = get_redis_url(redis['trivy_db_index'], redis) return configs diff --git a/make/photon/prepare/utils/core.py b/make/photon/prepare/utils/core.py index 43a1f29c0..5836cc0be 100644 --- a/make/photon/prepare/utils/core.py +++ b/make/photon/prepare/utils/core.py @@ -1,8 +1,8 @@ -import shutil, os - +import os +import shutil from g import config_dir, templates_dir, data_dir, DEFAULT_GID, DEFAULT_UID -from utils.misc import prepare_dir, generate_random_string from utils.jinja import render_jinja +from utils.misc import prepare_dir, generate_random_string core_config_dir = os.path.join(config_dir, "core", "certificates") core_env_template_path = os.path.join(templates_dir, "core", "env.jinja") @@ -19,7 +19,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_trivy, with_chartmus # Render Core # set cache for chart repo server # default set 'memory' mode, if redis is configured then set to 'redis' - if len(config_dict['redis_host']) > 0: + if len(config_dict['redis_url_core']) > 0: chart_cache_driver = "redis" else: chart_cache_driver = "memory" diff --git a/make/photon/prepare/utils/misc.py b/make/photon/prepare/utils/misc.py index 65de60168..c40ae9327 100644 --- a/make/photon/prepare/utils/misc.py +++ b/make/photon/prepare/utils/misc.py @@ -1,10 +1,12 @@ -import os, string, sys +import os import secrets +import string +import sys from pathlib import Path from functools import wraps - from g import DEFAULT_UID, DEFAULT_GID, host_root_dir + # To meet security requirement # By default it will change file mode to 0600, and make the owner of the file to 10000:10000 def mark_file(path, mode=0o600, uid=DEFAULT_UID, gid=DEFAULT_GID): @@ -52,22 +54,6 @@ def validate(conf, **kwargs): raise Exception( "Error: no provider configurations are provided for provider %s" % storage_provider_name) - # Redis validate - redis_host = conf.get("configuration", "redis_host") - if redis_host is None or len(redis_host) < 1: - raise Exception( - "Error: redis_host in harbor.yml needs to point to an endpoint of Redis server or cluster.") - - redis_port = conf.get("configuration", "redis_port") - if len(redis_port) < 1: - raise Exception( - "Error: redis_port in harbor.yml needs to point to the port of Redis server or cluster.") - - redis_db_index = conf.get("configuration", "redis_db_index").strip() - if len(redis_db_index.split(",")) != 3: - raise Exception( - "Error invalid value for redis_db_index: %s. please set it as 1,2,3" % redis_db_index) - def validate_crt_subj(dirty_subj): subj_list = [item for item in dirty_subj.strip().split("/") \ if len(item.split("=")) == 2 and len(item.split("=")[1]) > 0] diff --git a/make/photon/prepare/utils/registry.py b/make/photon/prepare/utils/registry.py index f4c86796a..79401a947 100644 --- a/make/photon/prepare/utils/registry.py +++ b/make/photon/prepare/utils/registry.py @@ -1,9 +1,10 @@ -import os, copy, subprocess - +import copy +import os +import subprocess from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID, data_dir -from utils.misc import prepare_dir +from urllib.parse import urlsplit from utils.jinja import render_jinja - +from utils.misc import prepare_dir registry_config_dir = os.path.join(config_dir, "registry") registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja") @@ -26,8 +27,11 @@ def prepare_registry(config_dict): gen_passwd_file(config_dict) storage_provider_info = get_storage_provider_info( - config_dict['storage_provider_name'], - config_dict['storage_provider_config']) + config_dict['storage_provider_name'], + config_dict['storage_provider_config']) + + # process redis info + redis_ops = parse_redis(config_dict['redis_url_reg']) render_jinja( registry_config_template_path, @@ -36,9 +40,27 @@ def prepare_registry(config_dict): gid=DEFAULT_GID, level=levels_map[config_dict['log_level']], storage_provider_info=storage_provider_info, - **config_dict) + **config_dict, **redis_ops) +def parse_redis(redis_url): + u = urlsplit(redis_url) + if not u.scheme or u.scheme == 'redis': + return { + 'redis_host': u.netloc.split('@')[-1], + 'redis_password': u.password or '', + 'redis_db_index_reg': u.path and int(u.path[1:]) or 0, + } + elif u.scheme == 'redis+sentinel': + return { + 'sentinel_master_set': u.path.split('/')[1], + 'redis_host': u.netloc.split('@')[-1], + 'redis_password': u.password or '', + 'redis_db_index_reg': len(u.path.split('/')) == 3 and int(u.path.split('/')[2]) or 0, + } + else: + raise Exception('bad redis url for registry:' + redis_url) + def get_storage_provider_info(provider_name, provider_config): provider_config_copy = copy.deepcopy(provider_config) if provider_name == "filesystem": diff --git a/make/photon/registry/builder b/make/photon/registry/builder index 2a0333875..8e7c603c1 100755 --- a/make/photon/registry/builder +++ b/make/photon/registry/builder @@ -27,6 +27,7 @@ echo 'add patch https://github.com/docker/distribution/pull/2879 ...' cd $TEMP wget https://github.com/docker/distribution/pull/2879.patch git apply 2879.patch +git apply $cur/redis.patch cd $cur echo 'build the registry binary ...' diff --git a/make/photon/registry/redis.patch b/make/photon/registry/redis.patch new file mode 100644 index 000000000..ee6924115 --- /dev/null +++ b/make/photon/registry/redis.patch @@ -0,0 +1,901 @@ +diff --git a/configuration/configuration.go b/configuration/configuration.go +index b347d63b..04cdd230 100644 +--- a/configuration/configuration.go ++++ b/configuration/configuration.go +@@ -162,6 +162,9 @@ type Configuration struct { + // Addr specifies the the redis instance available to the application. + Addr string `yaml:"addr,omitempty"` + ++ // SentinelMasterSet specifies the the redis sentinel master set name. ++ SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"` ++ + // Password string to use when making a connection. + Password string `yaml:"password,omitempty"` + +diff --git a/registry/handlers/app.go b/registry/handlers/app.go +index 978851bb..a8379071 100644 +--- a/registry/handlers/app.go ++++ b/registry/handlers/app.go +@@ -3,6 +3,7 @@ package handlers + import ( + "context" + cryptorand "crypto/rand" ++ "errors" + "expvar" + "fmt" + "math/rand" +@@ -15,6 +16,8 @@ import ( + "strings" + "time" + ++ "github.com/FZambia/sentinel" ++ + "github.com/docker/distribution" + "github.com/docker/distribution/configuration" + dcontext "github.com/docker/distribution/context" +@@ -24,7 +27,7 @@ import ( + "github.com/docker/distribution/notifications" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" +- "github.com/docker/distribution/registry/api/v2" ++ v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + registrymiddleware "github.com/docker/distribution/registry/middleware/registry" + repositorymiddleware "github.com/docker/distribution/registry/middleware/repository" +@@ -498,6 +501,44 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + return + } + ++ var getRedisAddr func() (string, error) ++ var testOnBorrow func(c redis.Conn, t time.Time) error ++ if configuration.Redis.SentinelMasterSet != "" { ++ sntnl := &sentinel.Sentinel{ ++ Addrs: strings.Split(configuration.Redis.Addr, ","), ++ MasterName: configuration.Redis.SentinelMasterSet, ++ Dial: func(addr string) (redis.Conn, error) { ++ c, err := redis.DialTimeout("tcp", addr, ++ configuration.Redis.DialTimeout, ++ configuration.Redis.ReadTimeout, ++ configuration.Redis.WriteTimeout) ++ if err != nil { ++ return nil, err ++ } ++ return c, nil ++ }, ++ } ++ getRedisAddr = func() (string, error) { ++ return sntnl.MasterAddr() ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ if !sentinel.TestRole(c, "master") { ++ return errors.New("role check failed") ++ } ++ return nil ++ } ++ ++ } else { ++ getRedisAddr = func() (string, error) { ++ return configuration.Redis.Addr, nil ++ } ++ testOnBorrow = func(c redis.Conn, t time.Time) error { ++ // TODO(stevvooe): We can probably do something more interesting ++ // here with the health package. ++ _, err := c.Do("PING") ++ return err ++ } ++ } + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + // TODO(stevvooe): Yet another use case for contextual timing. +@@ -513,8 +554,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + } + } + +- conn, err := redis.DialTimeout("tcp", +- configuration.Redis.Addr, ++ redisAddr, err := getRedisAddr() ++ if err != nil { ++ return nil, err ++ } ++ conn, err := redis.DialTimeout("tcp", redisAddr, + configuration.Redis.DialTimeout, + configuration.Redis.ReadTimeout, + configuration.Redis.WriteTimeout) +@@ -546,16 +590,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { + done(nil) + return conn, nil + }, +- MaxIdle: configuration.Redis.Pool.MaxIdle, +- MaxActive: configuration.Redis.Pool.MaxActive, +- IdleTimeout: configuration.Redis.Pool.IdleTimeout, +- TestOnBorrow: func(c redis.Conn, t time.Time) error { +- // TODO(stevvooe): We can probably do something more interesting +- // here with the health package. +- _, err := c.Do("PING") +- return err +- }, +- Wait: false, // if a connection is not available, proceed without cache. ++ MaxIdle: configuration.Redis.Pool.MaxIdle, ++ MaxActive: configuration.Redis.Pool.MaxActive, ++ IdleTimeout: configuration.Redis.Pool.IdleTimeout, ++ TestOnBorrow: testOnBorrow, ++ Wait: false, // if a connection is not available, proceed without cache. + } + + app.redis = pool +diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go +index 12c0b61c..8a644d83 100644 +--- a/registry/handlers/app_test.go ++++ b/registry/handlers/app_test.go +@@ -11,7 +11,7 @@ import ( + "github.com/docker/distribution/configuration" + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/api/errcode" +- "github.com/docker/distribution/registry/api/v2" ++ v2 "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/auth" + _ "github.com/docker/distribution/registry/auth/silly" + "github.com/docker/distribution/registry/storage" +@@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) { + // TestNewApp covers the creation of an application via NewApp with a + // configuration. + func TestNewApp(t *testing.T) { +- ctx := context.Background() ++ ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ runAppWithConfig(t, config) ++} ++ ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis). ++func TestNewAppWithRedis(t *testing.T) { + config := configuration.Configuration{ + Storage: configuration.Storage{ + "testdriver": nil, +@@ -157,7 +179,38 @@ func TestNewApp(t *testing.T) { + }, + }, + } ++ config.Redis.Addr = "127.0.0.1:6379" ++ config.Redis.DB = 0 ++ runAppWithConfig(t, config) ++} + ++// TestNewApp covers the creation of an application via NewApp with a ++// configuration(with redis sentinel cluster). ++func TestNewAppWithRedisSentinelCluster(t *testing.T) { ++ config := configuration.Configuration{ ++ Storage: configuration.Storage{ ++ "testdriver": nil, ++ "maintenance": configuration.Parameters{"uploadpurging": map[interface{}]interface{}{ ++ "enabled": false, ++ }}, ++ }, ++ Auth: configuration.Auth{ ++ // For now, we simply test that new auth results in a viable ++ // application. ++ "silly": { ++ "realm": "realm-test", ++ "service": "service-test", ++ }, ++ }, ++ } ++ config.Redis.Addr = "192.168.0.11:26379,192.168.0.12:26379" ++ config.Redis.DB = 0 ++ config.Redis.SentinelMasterSet = "mymaster" ++ runAppWithConfig(t, config) ++} ++ ++func runAppWithConfig(t *testing.T, config configuration.Configuration) { ++ ctx := context.Background() + // Mostly, with this test, given a sane configuration, we are simply + // ensuring that NewApp doesn't panic. We might want to tweak this + // behavior. +diff --git a/vendor.conf b/vendor.conf +index a249caf2..fcc9fee2 100644 +--- a/vendor.conf ++++ b/vendor.conf +@@ -49,3 +49,4 @@ gopkg.in/yaml.v2 v2.2.1 + rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git + github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb + github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 ++github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git +diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE +new file mode 100644 +index 00000000..8dada3ed +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/LICENSE +@@ -0,0 +1,201 @@ ++ Apache License ++ Version 2.0, January 2004 ++ http://www.apache.org/licenses/ ++ ++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION ++ ++ 1. Definitions. ++ ++ "License" shall mean the terms and conditions for use, reproduction, ++ and distribution as defined by Sections 1 through 9 of this document. ++ ++ "Licensor" shall mean the copyright owner or entity authorized by ++ the copyright owner that is granting the License. ++ ++ "Legal Entity" shall mean the union of the acting entity and all ++ other entities that control, are controlled by, or are under common ++ control with that entity. For the purposes of this definition, ++ "control" means (i) the power, direct or indirect, to cause the ++ direction or management of such entity, whether by contract or ++ otherwise, or (ii) ownership of fifty percent (50%) or more of the ++ outstanding shares, or (iii) beneficial ownership of such entity. ++ ++ "You" (or "Your") shall mean an individual or Legal Entity ++ exercising permissions granted by this License. ++ ++ "Source" form shall mean the preferred form for making modifications, ++ including but not limited to software source code, documentation ++ source, and configuration files. ++ ++ "Object" form shall mean any form resulting from mechanical ++ transformation or translation of a Source form, including but ++ not limited to compiled object code, generated documentation, ++ and conversions to other media types. ++ ++ "Work" shall mean the work of authorship, whether in Source or ++ Object form, made available under the License, as indicated by a ++ copyright notice that is included in or attached to the work ++ (an example is provided in the Appendix below). ++ ++ "Derivative Works" shall mean any work, whether in Source or Object ++ form, that is based on (or derived from) the Work and for which the ++ editorial revisions, annotations, elaborations, or other modifications ++ represent, as a whole, an original work of authorship. For the purposes ++ of this License, Derivative Works shall not include works that remain ++ separable from, or merely link (or bind by name) to the interfaces of, ++ the Work and Derivative Works thereof. ++ ++ "Contribution" shall mean any work of authorship, including ++ the original version of the Work and any modifications or additions ++ to that Work or Derivative Works thereof, that is intentionally ++ submitted to Licensor for inclusion in the Work by the copyright owner ++ or by an individual or Legal Entity authorized to submit on behalf of ++ the copyright owner. For the purposes of this definition, "submitted" ++ means any form of electronic, verbal, or written communication sent ++ to the Licensor or its representatives, including but not limited to ++ communication on electronic mailing lists, source code control systems, ++ and issue tracking systems that are managed by, or on behalf of, the ++ Licensor for the purpose of discussing and improving the Work, but ++ excluding communication that is conspicuously marked or otherwise ++ designated in writing by the copyright owner as "Not a Contribution." ++ ++ "Contributor" shall mean Licensor and any individual or Legal Entity ++ on behalf of whom a Contribution has been received by Licensor and ++ subsequently incorporated within the Work. ++ ++ 2. Grant of Copyright License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ copyright license to reproduce, prepare Derivative Works of, ++ publicly display, publicly perform, sublicense, and distribute the ++ Work and such Derivative Works in Source or Object form. ++ ++ 3. Grant of Patent License. Subject to the terms and conditions of ++ this License, each Contributor hereby grants to You a perpetual, ++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable ++ (except as stated in this section) patent license to make, have made, ++ use, offer to sell, sell, import, and otherwise transfer the Work, ++ where such license applies only to those patent claims licensable ++ by such Contributor that are necessarily infringed by their ++ Contribution(s) alone or by combination of their Contribution(s) ++ with the Work to which such Contribution(s) was submitted. If You ++ institute patent litigation against any entity (including a ++ cross-claim or counterclaim in a lawsuit) alleging that the Work ++ or a Contribution incorporated within the Work constitutes direct ++ or contributory patent infringement, then any patent licenses ++ granted to You under this License for that Work shall terminate ++ as of the date such litigation is filed. ++ ++ 4. Redistribution. You may reproduce and distribute copies of the ++ Work or Derivative Works thereof in any medium, with or without ++ modifications, and in Source or Object form, provided that You ++ meet the following conditions: ++ ++ (a) You must give any other recipients of the Work or ++ Derivative Works a copy of this License; and ++ ++ (b) You must cause any modified files to carry prominent notices ++ stating that You changed the files; and ++ ++ (c) You must retain, in the Source form of any Derivative Works ++ that You distribute, all copyright, patent, trademark, and ++ attribution notices from the Source form of the Work, ++ excluding those notices that do not pertain to any part of ++ the Derivative Works; and ++ ++ (d) If the Work includes a "NOTICE" text file as part of its ++ distribution, then any Derivative Works that You distribute must ++ include a readable copy of the attribution notices contained ++ within such NOTICE file, excluding those notices that do not ++ pertain to any part of the Derivative Works, in at least one ++ of the following places: within a NOTICE text file distributed ++ as part of the Derivative Works; within the Source form or ++ documentation, if provided along with the Derivative Works; or, ++ within a display generated by the Derivative Works, if and ++ wherever such third-party notices normally appear. The contents ++ of the NOTICE file are for informational purposes only and ++ do not modify the License. You may add Your own attribution ++ notices within Derivative Works that You distribute, alongside ++ or as an addendum to the NOTICE text from the Work, provided ++ that such additional attribution notices cannot be construed ++ as modifying the License. ++ ++ You may add Your own copyright statement to Your modifications and ++ may provide additional or different license terms and conditions ++ for use, reproduction, or distribution of Your modifications, or ++ for any such Derivative Works as a whole, provided Your use, ++ reproduction, and distribution of the Work otherwise complies with ++ the conditions stated in this License. ++ ++ 5. Submission of Contributions. Unless You explicitly state otherwise, ++ any Contribution intentionally submitted for inclusion in the Work ++ by You to the Licensor shall be under the terms and conditions of ++ this License, without any additional terms or conditions. ++ Notwithstanding the above, nothing herein shall supersede or modify ++ the terms of any separate license agreement you may have executed ++ with Licensor regarding such Contributions. ++ ++ 6. Trademarks. This License does not grant permission to use the trade ++ names, trademarks, service marks, or product names of the Licensor, ++ except as required for reasonable and customary use in describing the ++ origin of the Work and reproducing the content of the NOTICE file. ++ ++ 7. Disclaimer of Warranty. Unless required by applicable law or ++ agreed to in writing, Licensor provides the Work (and each ++ Contributor provides its Contributions) on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or ++ implied, including, without limitation, any warranties or conditions ++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A ++ PARTICULAR PURPOSE. You are solely responsible for determining the ++ appropriateness of using or redistributing the Work and assume any ++ risks associated with Your exercise of permissions under this License. ++ ++ 8. Limitation of Liability. In no event and under no legal theory, ++ whether in tort (including negligence), contract, or otherwise, ++ unless required by applicable law (such as deliberate and grossly ++ negligent acts) or agreed to in writing, shall any Contributor be ++ liable to You for damages, including any direct, indirect, special, ++ incidental, or consequential damages of any character arising as a ++ result of this License or out of the use or inability to use the ++ Work (including but not limited to damages for loss of goodwill, ++ work stoppage, computer failure or malfunction, or any and all ++ other commercial damages or losses), even if such Contributor ++ has been advised of the possibility of such damages. ++ ++ 9. Accepting Warranty or Additional Liability. While redistributing ++ the Work or Derivative Works thereof, You may choose to offer, ++ and charge a fee for, acceptance of support, warranty, indemnity, ++ or other liability obligations and/or rights consistent with this ++ License. However, in accepting such obligations, You may act only ++ on Your own behalf and on Your sole responsibility, not on behalf ++ of any other Contributor, and only if You agree to indemnify, ++ defend, and hold each Contributor harmless for any liability ++ incurred by, or claims asserted against, such Contributor by reason ++ of your accepting any such warranty or additional liability. ++ ++ END OF TERMS AND CONDITIONS ++ ++ APPENDIX: How to apply the Apache License to your work. ++ ++ To apply the Apache License to your work, attach the following ++ boilerplate notice, with the fields enclosed by brackets "{}" ++ replaced with your own identifying information. (Don't include ++ the brackets!) The text should be enclosed in the appropriate ++ comment syntax for the file format. We also recommend that a ++ file or class name and description of purpose be included on the ++ same "printed page" as the copyright notice for easier ++ identification within third-party archives. ++ ++ Copyright {yyyy} {name of copyright owner} ++ ++ Licensed under the Apache License, Version 2.0 (the "License"); ++ you may not use this file except in compliance with the License. ++ You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++ Unless required by applicable law or agreed to in writing, software ++ distributed under the License is distributed on an "AS IS" BASIS, ++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++ See the License for the specific language governing permissions and ++ limitations under the License. +diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md +new file mode 100644 +index 00000000..f544c54e +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/README.md +@@ -0,0 +1,39 @@ ++go-sentinel ++=========== ++ ++Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library. ++ ++Documentation ++------------- ++ ++- [API Reference](http://godoc.org/github.com/FZambia/sentinel) ++ ++Alternative solution ++-------------------- ++ ++You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA: ++ ++``` ++listen redis ++ server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 ++ server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup ++ bind *:6379 ++ mode tcp ++ option tcpka ++ option tcplog ++ option tcp-check ++ tcp-check send PING\r\n ++ tcp-check expect string +PONG ++ tcp-check send info\ replication\r\n ++ tcp-check expect string role:master ++ tcp-check send QUIT\r\n ++ tcp-check expect string +OK ++ balance roundrobin ++``` ++ ++This way you don't need to use this library. ++ ++License ++------- ++ ++Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). +diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go +new file mode 100644 +index 00000000..79209e9f +--- /dev/null ++++ b/vendor/github.com/FZambia/sentinel/sentinel.go +@@ -0,0 +1,426 @@ ++package sentinel ++ ++import ( ++ "errors" ++ "fmt" ++ "net" ++ "strings" ++ "sync" ++ "time" ++ ++ "github.com/garyburd/redigo/redis" ++) ++ ++// Sentinel provides a way to add high availability (HA) to Redis Pool using ++// preconfigured addresses of Sentinel servers and name of master which Sentinels ++// monitor. It works with Redis >= 2.8.12 (mostly because of ROLE command that ++// was introduced in that version, it's possible though to support old versions ++// using INFO command). ++// ++// Example of the simplest usage to contact master "mymaster": ++// ++// func newSentinelPool() *redis.Pool { ++// sntnl := &sentinel.Sentinel{ ++// Addrs: []string{":26379", ":26380", ":26381"}, ++// MasterName: "mymaster", ++// Dial: func(addr string) (redis.Conn, error) { ++// timeout := 500 * time.Millisecond ++// c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// } ++// return &redis.Pool{ ++// MaxIdle: 3, ++// MaxActive: 64, ++// Wait: true, ++// IdleTimeout: 240 * time.Second, ++// Dial: func() (redis.Conn, error) { ++// masterAddr, err := sntnl.MasterAddr() ++// if err != nil { ++// return nil, err ++// } ++// c, err := redis.Dial("tcp", masterAddr) ++// if err != nil { ++// return nil, err ++// } ++// return c, nil ++// }, ++// TestOnBorrow: func(c redis.Conn, t time.Time) error { ++// if !sentinel.TestRole(c, "master") { ++// return errors.New("Role check failed") ++// } else { ++// return nil ++// } ++// }, ++// } ++// } ++type Sentinel struct { ++ // Addrs is a slice with known Sentinel addresses. ++ Addrs []string ++ ++ // MasterName is a name of Redis master Sentinel servers monitor. ++ MasterName string ++ ++ // Dial is a user supplied function to connect to Sentinel on given address. This ++ // address will be chosen from Addrs slice. ++ // Note that as per the redis-sentinel client guidelines, a timeout is mandatory ++ // while connecting to Sentinels, and should not be set to 0. ++ Dial func(addr string) (redis.Conn, error) ++ ++ // Pool is a user supplied function returning custom connection pool to Sentinel. ++ // This can be useful to tune options if you are not satisfied with what default ++ // Sentinel pool offers. See defaultPool() method for default pool implementation. ++ // In most cases you only need to provide Dial function and let this be nil. ++ Pool func(addr string) *redis.Pool ++ ++ mu sync.RWMutex ++ pools map[string]*redis.Pool ++ addr string ++} ++ ++// NoSentinelsAvailable is returned when all sentinels in the list are exhausted ++// (or none configured), and contains the last error returned by Dial (which ++// may be nil) ++type NoSentinelsAvailable struct { ++ lastError error ++} ++ ++func (ns NoSentinelsAvailable) Error() string { ++ if ns.lastError != nil { ++ return fmt.Sprintf("redigo: no sentinels available; last error: %s", ns.lastError.Error()) ++ } ++ return fmt.Sprintf("redigo: no sentinels available") ++} ++ ++// putToTop puts Sentinel address to the top of address list - this means ++// that all next requests will use Sentinel on this address first. ++// ++// From Sentinel guidelines: ++// ++// The first Sentinel replying to the client request should be put at the ++// start of the list, so that at the next reconnection, we'll try first ++// the Sentinel that was reachable in the previous connection attempt, ++// minimizing latency. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToTop(addr string) { ++ addrs := s.Addrs ++ if addrs[0] == addr { ++ // Already on top. ++ return ++ } ++ newAddrs := []string{addr} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ s.Addrs = newAddrs ++} ++ ++// putToBottom puts Sentinel address to the bottom of address list. ++// We call this method internally when see that some Sentinel failed to answer ++// on application request so next time we start with another one. ++// ++// Lock must be held by caller. ++func (s *Sentinel) putToBottom(addr string) { ++ addrs := s.Addrs ++ if addrs[len(addrs)-1] == addr { ++ // Already on bottom. ++ return ++ } ++ newAddrs := []string{} ++ for _, a := range addrs { ++ if a == addr { ++ continue ++ } ++ newAddrs = append(newAddrs, a) ++ } ++ newAddrs = append(newAddrs, addr) ++ s.Addrs = newAddrs ++} ++ ++// defaultPool returns a connection pool to one Sentinel. This allows ++// us to call concurrent requests to Sentinel using connection Do method. ++func (s *Sentinel) defaultPool(addr string) *redis.Pool { ++ return &redis.Pool{ ++ MaxIdle: 3, ++ MaxActive: 10, ++ Wait: true, ++ IdleTimeout: 240 * time.Second, ++ Dial: func() (redis.Conn, error) { ++ return s.Dial(addr) ++ }, ++ TestOnBorrow: func(c redis.Conn, t time.Time) error { ++ _, err := c.Do("PING") ++ return err ++ }, ++ } ++} ++ ++func (s *Sentinel) get(addr string) redis.Conn { ++ pool := s.poolForAddr(addr) ++ return pool.Get() ++} ++ ++func (s *Sentinel) poolForAddr(addr string) *redis.Pool { ++ s.mu.Lock() ++ if s.pools == nil { ++ s.pools = make(map[string]*redis.Pool) ++ } ++ pool, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return pool ++ } ++ s.mu.Unlock() ++ newPool := s.newPool(addr) ++ s.mu.Lock() ++ p, ok := s.pools[addr] ++ if ok { ++ s.mu.Unlock() ++ return p ++ } ++ s.pools[addr] = newPool ++ s.mu.Unlock() ++ return newPool ++} ++ ++func (s *Sentinel) newPool(addr string) *redis.Pool { ++ if s.Pool != nil { ++ return s.Pool(addr) ++ } ++ return s.defaultPool(addr) ++} ++ ++// close connection pool to Sentinel. ++// Lock must be hold by caller. ++func (s *Sentinel) close() { ++ if s.pools != nil { ++ for _, pool := range s.pools { ++ pool.Close() ++ } ++ } ++ s.pools = nil ++} ++ ++func (s *Sentinel) doUntilSuccess(f func(redis.Conn) (interface{}, error)) (interface{}, error) { ++ s.mu.RLock() ++ addrs := s.Addrs ++ s.mu.RUnlock() ++ ++ var lastErr error ++ ++ for _, addr := range addrs { ++ conn := s.get(addr) ++ reply, err := f(conn) ++ conn.Close() ++ if err != nil { ++ lastErr = err ++ s.mu.Lock() ++ pool, ok := s.pools[addr] ++ if ok { ++ pool.Close() ++ delete(s.pools, addr) ++ } ++ s.putToBottom(addr) ++ s.mu.Unlock() ++ continue ++ } ++ s.putToTop(addr) ++ return reply, nil ++ } ++ ++ return nil, NoSentinelsAvailable{lastError: lastErr} ++} ++ ++// MasterAddr returns an address of current Redis master instance. ++func (s *Sentinel) MasterAddr() (string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForMaster(c, s.MasterName) ++ }) ++ if err != nil { ++ return "", err ++ } ++ return res.(string), nil ++} ++ ++// SlaveAddrs returns a slice with known slave addresses of current master instance. ++func (s *Sentinel) SlaveAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaveAddrs(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Slave represents a Redis slave instance which is known by Sentinel. ++type Slave struct { ++ ip string ++ port string ++ flags string ++} ++ ++// Addr returns an address of slave. ++func (s *Slave) Addr() string { ++ return net.JoinHostPort(s.ip, s.port) ++} ++ ++// Available returns if slave is in working state at moment based on information in slave flags. ++func (s *Slave) Available() bool { ++ return !strings.Contains(s.flags, "disconnected") && !strings.Contains(s.flags, "s_down") ++} ++ ++// Slaves returns a slice with known slaves of master instance. ++func (s *Sentinel) Slaves() ([]*Slave, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSlaves(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]*Slave), nil ++} ++ ++// SentinelAddrs returns a slice of known Sentinel addresses Sentinel server aware of. ++func (s *Sentinel) SentinelAddrs() ([]string, error) { ++ res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { ++ return queryForSentinels(c, s.MasterName) ++ }) ++ if err != nil { ++ return nil, err ++ } ++ return res.([]string), nil ++} ++ ++// Discover allows to update list of known Sentinel addresses. From docs: ++// ++// A client may update its internal list of Sentinel nodes following this procedure: ++// 1) Obtain a list of other Sentinels for this master using the command SENTINEL sentinels . ++// 2) Add every ip:port pair not already existing in our list at the end of the list. ++func (s *Sentinel) Discover() error { ++ addrs, err := s.SentinelAddrs() ++ if err != nil { ++ return err ++ } ++ s.mu.Lock() ++ for _, addr := range addrs { ++ if !stringInSlice(addr, s.Addrs) { ++ s.Addrs = append(s.Addrs, addr) ++ } ++ } ++ s.mu.Unlock() ++ return nil ++} ++ ++// Close closes current connection to Sentinel. ++func (s *Sentinel) Close() error { ++ s.mu.Lock() ++ s.close() ++ s.mu.Unlock() ++ return nil ++} ++ ++// TestRole wraps GetRole in a test to verify if the role matches an expected ++// role string. If there was any error in querying the supplied connection, ++// the function returns false. Works with Redis >= 2.8.12. ++// It's not goroutine safe, but if you call this method on pooled connections ++// then you are OK. ++func TestRole(c redis.Conn, expectedRole string) bool { ++ role, err := getRole(c) ++ if err != nil || role != expectedRole { ++ return false ++ } ++ return true ++} ++ ++// getRole is a convenience function supplied to query an instance (master or ++// slave) for its role. It attempts to use the ROLE command introduced in ++// redis 2.8.12. ++func getRole(c redis.Conn) (string, error) { ++ res, err := c.Do("ROLE") ++ if err != nil { ++ return "", err ++ } ++ rres, ok := res.([]interface{}) ++ if ok { ++ return redis.String(rres[0], nil) ++ } ++ return "", errors.New("redigo: can not transform ROLE reply to string") ++} ++ ++func queryForMaster(conn redis.Conn, masterName string) (string, error) { ++ res, err := redis.Strings(conn.Do("SENTINEL", "get-master-addr-by-name", masterName)) ++ if err != nil { ++ return "", err ++ } ++ if len(res) < 2 { ++ return "", errors.New("redigo: malformed get-master-addr-by-name reply") ++ } ++ masterAddr := net.JoinHostPort(res[0], res[1]) ++ return masterAddr, nil ++} ++ ++func queryForSlaveAddrs(conn redis.Conn, masterName string) ([]string, error) { ++ slaves, err := queryForSlaves(conn, masterName) ++ if err != nil { ++ return nil, err ++ } ++ slaveAddrs := make([]string, 0) ++ for _, slave := range slaves { ++ slaveAddrs = append(slaveAddrs, slave.Addr()) ++ } ++ return slaveAddrs, nil ++} ++ ++func queryForSlaves(conn redis.Conn, masterName string) ([]*Slave, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "slaves", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ slaves := make([]*Slave, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return slaves, err ++ } ++ slave := &Slave{ ++ ip: sm["ip"], ++ port: sm["port"], ++ flags: sm["flags"], ++ } ++ slaves = append(slaves, slave) ++ } ++ return slaves, nil ++} ++ ++func queryForSentinels(conn redis.Conn, masterName string) ([]string, error) { ++ res, err := redis.Values(conn.Do("SENTINEL", "sentinels", masterName)) ++ if err != nil { ++ return nil, err ++ } ++ sentinels := make([]string, 0) ++ for _, a := range res { ++ sm, err := redis.StringMap(a, err) ++ if err != nil { ++ return sentinels, err ++ } ++ sentinels = append(sentinels, fmt.Sprintf("%s:%s", sm["ip"], sm["port"])) ++ } ++ return sentinels, nil ++} ++ ++func stringInSlice(str string, slice []string) bool { ++ for _, s := range slice { ++ if s == str { ++ return true ++ } ++ } ++ return false ++} diff --git a/src/chartserver/cache.go b/src/chartserver/cache.go index 8d3bbb607..d6cbe34d5 100644 --- a/src/chartserver/cache.go +++ b/src/chartserver/cache.go @@ -14,13 +14,14 @@ import ( ) const ( - standardExpireTime = 3600 * time.Second - redisENVKey = "_REDIS_URL" - cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis" - cacheDriverMem = "memory" - cacheDriverRedis = "redis" - cacheCollectionName = "helm_chart_cache" - maxTry = 10 + standardExpireTime = 3600 * time.Second + redisENVKey = "_REDIS_URL_CORE" + cacheDriverENVKey = "CHART_CACHE_DRIVER" // "memory" or "redis" + cacheDriverMem = "memory" + cacheDriverRedis = "redis" + cacheDriverRedisSentinel = "redis_sentinel" + cacheCollectionName = "helm_chart_cache" + maxTry = 10 ) // ChartCache is designed to cache some processed data for repeated accessing @@ -181,6 +182,27 @@ func initCacheDriver(cacheConfig *ChartCacheConfig) beego_cache.Cache { return nil } + hlog.Info("Enable redis cache for chart caching") + return redisCache + } + case cacheDriverRedisSentinel: + // New with retry + count := 0 + for { + count++ + redisCache, err := beego_cache.NewCache(cacheDriverRedisSentinel, cacheConfig.Config) + if err != nil { + // Just logged + hlog.Errorf("Failed to initialize redis cache: %s", err) + + if count < maxTry { + <-time.After(time.Duration(backoff(count)) * time.Second) + continue + } + + return nil + } + hlog.Info("Enable redis cache for chart caching") return redisCache } diff --git a/src/chartserver/redis_sentinel.go b/src/chartserver/redis_sentinel.go new file mode 100644 index 000000000..a1779f282 --- /dev/null +++ b/src/chartserver/redis_sentinel.go @@ -0,0 +1,250 @@ +package chartserver + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/FZambia/sentinel" + "strconv" + "time" + + "github.com/gomodule/redigo/redis" + + "github.com/astaxie/beego/cache" + "strings" +) + +var ( + // DefaultKey the collection name of redis for cache adapter. + DefaultKey = "beecacheRedis" +) + +// Cache is Redis cache adapter. +type Cache struct { + p *redis.Pool // redis connection pool + conninfo string + dbNum int + key string + password string + maxIdle int + masterName string +} + +// NewRedisCache create new redis cache with default collection name. +func NewRedisCache() cache.Cache { + return &Cache{key: DefaultKey} +} + +// actually do the redis cmds, args[0] must be the key name. +func (rc *Cache) do(commandName string, args ...interface{}) (reply interface{}, err error) { + if len(args) < 1 { + return nil, errors.New("missing required arguments") + } + args[0] = rc.associate(args[0]) + c := rc.p.Get() + defer c.Close() + + return c.Do(commandName, args...) +} + +// associate with config key. +func (rc *Cache) associate(originKey interface{}) string { + return fmt.Sprintf("%s:%s", rc.key, originKey) +} + +// Get cache from redis. +func (rc *Cache) Get(key string) interface{} { + if v, err := rc.do("GET", key); err == nil { + return v + } + return nil +} + +// GetMulti get cache from redis. +func (rc *Cache) GetMulti(keys []string) []interface{} { + c := rc.p.Get() + defer c.Close() + var args []interface{} + for _, key := range keys { + args = append(args, rc.associate(key)) + } + values, err := redis.Values(c.Do("MGET", args...)) + if err != nil { + return nil + } + return values +} + +// Put put cache to redis. +func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error { + _, err := rc.do("SETEX", key, int64(timeout/time.Second), val) + return err +} + +// Delete delete cache in redis. +func (rc *Cache) Delete(key string) error { + _, err := rc.do("DEL", key) + return err +} + +// IsExist check cache's existence in redis. +func (rc *Cache) IsExist(key string) bool { + v, err := redis.Bool(rc.do("EXISTS", key)) + if err != nil { + return false + } + return v +} + +// Incr increase counter in redis. +func (rc *Cache) Incr(key string) error { + _, err := redis.Bool(rc.do("INCRBY", key, 1)) + return err +} + +// Decr decrease counter in redis. +func (rc *Cache) Decr(key string) error { + _, err := redis.Bool(rc.do("INCRBY", key, -1)) + return err +} + +// ClearAll clean all cache in redis. delete this redis collection. +func (rc *Cache) ClearAll() error { + c := rc.p.Get() + defer c.Close() + cachedKeys, err := redis.Strings(c.Do("KEYS", rc.key+":*")) + if err != nil { + return err + } + for _, str := range cachedKeys { + if _, err = c.Do("DEL", str); err != nil { + return err + } + } + return err +} + +// StartAndGC start redis cache adapter. +// config is like {"key":"collection key","conn":"connection info","dbNum":"0","masterName":"mymaster"} +// the cache item in redis are stored forever, +// so no gc operation. +func (rc *Cache) StartAndGC(config string) error { + var cf map[string]string + json.Unmarshal([]byte(config), &cf) + + if _, ok := cf["key"]; !ok { + cf["key"] = DefaultKey + } + if _, ok := cf["masterName"]; !ok { + return errors.New("config has no masterName") + } + if _, ok := cf["conn"]; !ok { + return errors.New("config has no conn key") + } + + // Format redis://@: + cf["conn"] = strings.Replace(cf["conn"], "redis://", "", 1) + cf["conn"] = strings.Replace(cf["conn"], "redis_sentinel://", "", 1) + if i := strings.Index(cf["conn"], "@"); i > -1 { + cf["password"] = cf["conn"][0:i] + cf["conn"] = cf["conn"][i+1:] + } + + if _, ok := cf["dbNum"]; !ok { + cf["dbNum"] = "0" + } + if _, ok := cf["password"]; !ok { + cf["password"] = "" + } + if _, ok := cf["maxIdle"]; !ok { + cf["maxIdle"] = "3" + } + rc.key = cf["key"] + rc.masterName = cf["masterName"] + rc.conninfo = cf["conn"] + rc.dbNum, _ = strconv.Atoi(cf["dbNum"]) + rc.password = cf["password"] + rc.maxIdle, _ = strconv.Atoi(cf["maxIdle"]) + + rc.connectInit() + + c := rc.p.Get() + defer c.Close() + + return c.Err() +} + +// connect to redis. +func (rc *Cache) connectInit() { + dialFunc := func() (c redis.Conn, err error) { + c, err = redis.Dial("tcp", rc.conninfo) + if err != nil { + return nil, err + } + + if rc.password != "" { + if _, err := c.Do("AUTH", rc.password); err != nil { + c.Close() + return nil, err + } + } + + _, selecterr := c.Do("SELECT", rc.dbNum) + if selecterr != nil { + c.Close() + return nil, selecterr + } + return + } + // initialize a new pool + rc.p = &redis.Pool{ + MaxIdle: rc.maxIdle, + IdleTimeout: 180 * time.Second, + Dial: dialFunc, + } + + var sentinelOptions []redis.DialOption + + redisOptions := sentinelOptions + + if rc.password != "" { + redisOptions = append(redisOptions, redis.DialPassword(rc.password)) + } + + redisOptions = append(redisOptions, redis.DialDatabase(rc.dbNum)) + sntnl := &sentinel.Sentinel{ + Addrs: strings.Split(rc.conninfo, ","), + MasterName: rc.masterName, + Dial: func(addr string) (redis.Conn, error) { + fmt.Println("chart dial redis sentinel:", addr) + c, err := redis.Dial("tcp", addr, sentinelOptions...) + if err != nil { + return nil, err + } + return c, nil + }, + } + + rc.p = &redis.Pool{ + Dial: func() (redis.Conn, error) { + masterAddr, err := sntnl.MasterAddr() + if err != nil { + return nil, err + } + fmt.Println("chart dial redis master:", masterAddr, "db:", rc.dbNum) + return redis.Dial("tcp", masterAddr, redisOptions...) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + if !sentinel.TestRole(c, "master") { + return errors.New("role check failed") + } + return nil + }, + MaxIdle: rc.maxIdle, + IdleTimeout: 180 * time.Second, + } +} + +func init() { + cache.Register("redis_sentinel", NewRedisCache) +} diff --git a/src/chartserver/utils.go b/src/chartserver/utils.go index f64148a61..2166f8a3b 100644 --- a/src/chartserver/utils.go +++ b/src/chartserver/utils.go @@ -6,6 +6,7 @@ import ( "fmt" "net/url" "os" + "strconv" "strings" ) @@ -35,62 +36,69 @@ func extractError(content []byte) (text string, err error) { } // Parse the redis configuration to the beego cache pattern -// Config pattern is "address:port[,weight,password,db_index]" -func parseRedisConfig(redisConfigV string) (string, error) { +// redis://:password@host:6379/1 +// redis+sentinel://anonymous:password@host1:26379,host2:26379/mymaster/1 +func parseRedisConfig(redisConfigV string) (map[string]string, error) { if len(redisConfigV) == 0 { - return "", errors.New("empty redis config") + return nil, errors.New("empty redis config") } redisConfig := make(map[string]string) redisConfig["key"] = cacheCollectionName - // Try best to parse the configuration segments. - // If the related parts are missing, assign default value. - // The default database index for UI process is 0. - configSegments := strings.Split(redisConfigV, ",") - for i, segment := range configSegments { - if i > 3 { - // ignore useless segments - break - } - - switch i { - // address:port - case 0: - redisConfig["conn"] = segment - // password, may not exist - case 2: - redisConfig["password"] = segment - // database index, may not exist - case 3: - redisConfig["dbNum"] = segment - } + if strings.Index(redisConfigV, "//") < 0 { + redisConfigV = "redis://" + redisConfigV } - - // Assign default value - if len(redisConfig["dbNum"]) == 0 { - redisConfig["dbNum"] = "0" - } - - // Try to validate the connection address - fullAddr := redisConfig["conn"] - if strings.Index(fullAddr, "://") == -1 { - // Append schema - fullAddr = fmt.Sprintf("redis://%s", fullAddr) - } - // Validate it by url - _, err := url.Parse(fullAddr) + u, err := url.Parse(redisConfigV) if err != nil { - return "", err + return nil, fmt.Errorf("bad _REDIS_URL:%s", redisConfigV) + } + if u.Scheme == "redis+sentinel" { + ps := strings.Split(u.Path, "/") + if len(ps) < 2 { + return nil, fmt.Errorf("bad redis sentinel url: no master name, %s", redisConfigV) + } + if _, err := strconv.Atoi(ps[1]); err == nil { + return nil, fmt.Errorf("bad redis sentinel url: master name should not be a number, %s", redisConfigV) + } + redisConfig["conn"] = u.Host + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + redisConfig["password"] = password + } + } + if len(ps) > 2 { + if _, err := strconv.Atoi(ps[2]); err != nil { + return nil, fmt.Errorf("bad redis sentinel url: bad db, %s", redisConfigV) + } + redisConfig["dbNum"] = ps[2] + } else { + redisConfig["dbNum"] = "0" + } + redisConfig["masterName"] = ps[1] + } else if u.Scheme == "redis" { + redisConfig["conn"] = u.Host // host + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + redisConfig["password"] = password + } + } + if len(u.Path) > 1 { + if _, err := strconv.Atoi(u.Path[1:]); err != nil { + return nil, fmt.Errorf("bad redis url: bad db, %s", redisConfigV) + } + redisConfig["dbNum"] = u.Path[1:] + } else { + redisConfig["dbNum"] = "0" + } + } else { + return nil, fmt.Errorf("bad redis scheme, %s", redisConfigV) } - // Convert config map to string - cfgData, err := json.Marshal(redisConfig) - if err != nil { - return "", err - } - - return string(cfgData), nil + return redisConfig, nil } // What's the cache driver if it is set @@ -121,9 +129,18 @@ func getCacheConfig() (*ChartCacheConfig, error) { if err != nil { return nil, fmt.Errorf("failed to parse redis configurations from '%s' with error: %s", redisCfg, err) } + if _, isSet := redisCfg["masterName"]; isSet { + driver = "redis_sentinel" + } + + // Convert config map to string + cfgData, err := json.Marshal(redisCfg) + if err != nil { + return nil, fmt.Errorf("failed to parse redis configurations from '%s' with error: %s", redisCfg, err) + } return &ChartCacheConfig{ DriverType: driver, - Config: redisCfg, + Config: string(cfgData), }, nil } diff --git a/src/chartserver/utils_test.go b/src/chartserver/utils_test.go index 7fb276e7f..836871dae 100644 --- a/src/chartserver/utils_test.go +++ b/src/chartserver/utils_test.go @@ -3,7 +3,6 @@ package chartserver import ( "encoding/json" "os" - "strings" "testing" ) @@ -17,28 +16,55 @@ func TestParseRedisConfig(t *testing.T) { // Case 2: short pattern, addr:port redisAddr = "redis:6379" - if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { - t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConnStr) + if parsedConn, err := parseRedisConfig(redisAddr); err != nil { + t.Fatalf("expect nil error but got non nil one if addr is short pattern: %s\n", parsedConn) } // Case 3: long pattern but miss some parts - redisAddr = "redis:6379,100" - if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { - t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %s\n", parsedConnStr) + redisAddr = "redis:6379?idle_timeout_seconds=100" + if parsedConn, err := parseRedisConfig(redisAddr); err != nil { + t.Fatalf("expect nil error but got non nil one if addr is long pattern with some parts missing: %v\n", parsedConn) } else { - if strings.Index(parsedConnStr, `"dbNum":"0"`) == -1 { - t.Fatalf("expect 'dbNum:0' in the parsed conn str but got nothing: %s\n", parsedConnStr) + if num, ok := parsedConn["dbNum"]; !ok || num != "0" { + t.Fatalf("expect 'dbNum:0' in the parsed conn str: %v\n", parsedConn) } } // Case 4: long pattern - redisAddr = "redis:6379,100,Passw0rd,1" - if parsedConnStr, err := parseRedisConfig(redisAddr); err != nil { + redisAddr = ":Passw0rd@redis:6379/1?idle_timeout_seconds=100" + if parsedConn, err := parseRedisConfig(redisAddr); err != nil { t.Fatal("expect nil error but got non nil one if addr is long pattern") } else { - if strings.Index(parsedConnStr, `"dbNum":"1"`) == -1 || - strings.Index(parsedConnStr, `"password":"Passw0rd"`) == -1 { - t.Fatalf("expect 'dbNum:0' and 'password:Passw0rd' in the parsed conn str but got nothing: %s", parsedConnStr) + if num, ok := parsedConn["dbNum"]; !ok || num != "1" { + t.Fatalf("expect 'dbNum:1' in the parsed conn str: %v", parsedConn) + } + if p, ok := parsedConn["password"]; !ok || p != "Passw0rd" { + t.Fatalf("expect 'password:Passw0rd' in the parsed conn str: %v", parsedConn) + } + } + + // Case 5: sentinel but miss master name + redisAddr = "redis+sentinel://:Passw0rd@redis1:26379,redis2:26379/1?idle_timeout_seconds=100" + if _, err := parseRedisConfig(redisAddr); err == nil { + t.Fatal("expect no master name error but got nil") + } + + // Case 6: sentinel + redisAddr = "redis+sentinel://:Passw0rd@redis1:26379,redis2:26379/mymaster/1?idle_timeout_seconds=100" + if parsedConn, err := parseRedisConfig(redisAddr); err != nil { + t.Fatal("expect nil error but got non nil one if addr is long pattern") + } else { + if num, ok := parsedConn["dbNum"]; !ok || num != "1" { + t.Fatalf("expect 'dbNum:0' in the parsed conn str: %v", parsedConn) + } + if p, ok := parsedConn["password"]; !ok || p != "Passw0rd" { + t.Fatalf("expect 'password:Passw0rd' in the parsed conn str: %v", parsedConn) + } + if v, ok := parsedConn["masterName"]; !ok || v != "mymaster" { + t.Fatalf("expect 'masterName:mymaster' in the parsed conn str: %v", parsedConn) + } + if v, ok := parsedConn["conn"]; !ok || v != "redis1:26379,redis2:26379" { + t.Fatalf("expect 'conn:redis1:26379,redis2:26379' in the parsed conn str: %v", parsedConn) } } } @@ -73,7 +99,7 @@ func TestGetCacheConfig(t *testing.T) { } // case 5: redis cache conf - os.Setenv(redisENVKey, "redis:6379,100,Passw0rd,1") + os.Setenv(redisENVKey, ":Passw0rd@redis:6379/1?idle_timeout_seconds=100") redisConf, err := getCacheConfig() if err != nil { t.Fatalf("expect nil error but got non-nil one when parsing valid redis conf") diff --git a/src/common/utils/redis/helper.go b/src/common/utils/redis/helper.go deleted file mode 100644 index 3e90bb5b4..000000000 --- a/src/common/utils/redis/helper.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "errors" - "fmt" - "os" - "strconv" - "sync" - "time" - - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/common/utils" - "github.com/goharbor/harbor/src/core/config" - "github.com/goharbor/harbor/src/lib/log" -) - -var ( - // ErrUnLock ... - ErrUnLock = errors.New("error to release the redis lock") -) - -const ( - unlockScript = ` -if redis.call("get",KEYS[1]) == ARGV[1] then - return redis.call("del",KEYS[1]) -else - return 0 -end -` -) - -// Mutex ... -type Mutex struct { - Conn redis.Conn - key string - value string - opts Options -} - -// New ... -func New(conn redis.Conn, key, value string) *Mutex { - o := *DefaultOptions() - if value == "" { - value = utils.GenerateRandomString() - } - return &Mutex{conn, key, value, o} -} - -// Require retry to require the lock -func (rm *Mutex) Require() (bool, error) { - var isRequired bool - var err error - - for i := 0; i < rm.opts.maxRetry; i++ { - isRequired, err = rm.require() - if isRequired { - break - } - if err != nil || !isRequired { - time.Sleep(rm.opts.retryDelay) - } - } - - return isRequired, err -} - -// require get the redis lock, for details, just refer to https://redis.io/topics/distlock -func (rm *Mutex) require() (bool, error) { - reply, err := redis.String(rm.Conn.Do("SET", rm.key, rm.value, "NX", "PX", int(rm.opts.expiry/time.Millisecond))) - if err != nil { - return false, err - } - return reply == "OK", nil -} - -// Free releases the lock, for details, just refer to https://redis.io/topics/distlock -func (rm *Mutex) Free() (bool, error) { - script := redis.NewScript(1, unlockScript) - resp, err := redis.Int(script.Do(rm.Conn, rm.key, rm.value)) - if err != nil { - return false, err - } - if resp == 0 { - return false, ErrUnLock - } - return true, nil -} - -// Options ... -type Options struct { - retryDelay time.Duration - expiry time.Duration - maxRetry int -} - -var ( - opt *Options - optOnce sync.Once - - defaultDelay = int64(1) // 1 second - defaultMaxRetry = 600 - defaultExpire = int64(2 * time.Hour / time.Second) // 2 hours -) - -// DefaultOptions ... -func DefaultOptions() *Options { - optOnce.Do(func() { - retryDelay, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_RETRY_DELAY"), 10, 64) - if err != nil || retryDelay < 0 { - retryDelay = defaultDelay - } - - maxRetry, err := strconv.Atoi(os.Getenv("REDIS_LOCK_MAX_RETRY")) - if err != nil || maxRetry < 0 { - maxRetry = defaultMaxRetry - } - - expire, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_EXPIRE"), 10, 64) - if err != nil || expire < 0 { - expire = defaultExpire - } - - opt = &Options{ - retryDelay: time.Duration(retryDelay) * time.Second, - expiry: time.Duration(expire) * time.Second, - maxRetry: maxRetry, - } - }) - - return opt -} - -var ( - pool *redis.Pool - poolOnce sync.Once - - poolMaxIdle = 200 - poolMaxActive = 1000 - poolIdleTimeout int64 = 180 -) - -// DefaultPool return default redis pool -func DefaultPool() *redis.Pool { - poolOnce.Do(func() { - maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE")) - if err != nil || maxIdle < 0 { - maxIdle = poolMaxIdle - } - - maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE")) - if err != nil || maxActive < 0 { - maxActive = poolMaxActive - } - - idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64) - if err != nil || idleTimeout < 0 { - idleTimeout = poolIdleTimeout - } - - pool = &redis.Pool{ - Dial: func() (redis.Conn, error) { - url := config.GetRedisOfRegURL() - if url == "" { - url = "redis://localhost:6379/1" - } - - return redis.DialURL(url) - }, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - MaxIdle: maxIdle, - MaxActive: maxActive, - IdleTimeout: time.Duration(idleTimeout) * time.Second, - Wait: true, - } - }) - - return pool -} - -// RequireLock returns lock by key -func RequireLock(key string, conns ...redis.Conn) (*Mutex, error) { - var conn redis.Conn - if len(conns) > 0 { - conn = conns[0] - } else { - conn = DefaultPool().Get() - } - - m := New(conn, key, utils.GenerateRandomString()) - ok, err := m.Require() - if err != nil { - return nil, fmt.Errorf("require redis lock failed: %v", err) - } - - if !ok { - return nil, fmt.Errorf("unable to require lock for %s", key) - } - - return m, nil -} - -// FreeLock free lock -func FreeLock(m *Mutex) error { - if _, err := m.Free(); err != nil { - log.Warningf("failed to free lock %s, error: %v", m.key, err) - return err - } - - if err := m.Conn.Close(); err != nil { - log.Warningf("failed to close the redis con for lock %s, error: %v", m.key, err) - return err - } - - return nil -} diff --git a/src/common/utils/redis/helper_test.go b/src/common/utils/redis/helper_test.go deleted file mode 100644 index 71572bc01..000000000 --- a/src/common/utils/redis/helper_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright Project Harbor Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redis - -import ( - "fmt" - "os" - "testing" - "time" - - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/common/utils" - "github.com/stretchr/testify/assert" -) - -const testingRedisHost = "REDIS_HOST" - -func init() { - os.Setenv("REDIS_LOCK_MAX_RETRY", "5") -} - -func TestRedisLock(t *testing.T) { - con, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) - assert.Nil(t, err) - defer con.Close() - - rm := New(con, "test-redis-lock", "test-value") - - successLock, err := rm.Require() - assert.Nil(t, err) - assert.True(t, successLock) - - time.Sleep(2 * time.Second) - _, err = rm.Require() - assert.NotNil(t, err) - - successUnLock, err := rm.Free() - assert.Nil(t, err) - assert.True(t, successUnLock) - -} - -func TestRequireLock(t *testing.T) { - assert := assert.New(t) - - conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) - assert.Nil(err) - defer conn.Close() - - if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) { - l.Free() - } - - if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) { - FreeLock(l) - } - - key := utils.GenerateRandomString() - if l, err := RequireLock(key); assert.Nil(err) { - defer FreeLock(l) - - _, err = RequireLock(key) - assert.Error(err) - } -} - -func TestFreeLock(t *testing.T) { - assert := assert.New(t) - - if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) { - assert.Nil(FreeLock(l)) - } - - conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379)) - assert.Nil(err) - - if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) { - conn.Close() - assert.Error(FreeLock(l)) - } -} - -func getRedisHost() string { - redisHost := os.Getenv(testingRedisHost) - if redisHost == "" { - redisHost = "127.0.0.1" // for local test - } - - return redisHost -} diff --git a/src/controller/blob/controller.go b/src/controller/blob/controller.go index 2444cc59a..34d5b593b 100644 --- a/src/controller/blob/controller.go +++ b/src/controller/blob/controller.go @@ -17,14 +17,15 @@ package blob import ( "context" "fmt" + "github.com/docker/distribution" - "github.com/garyburd/redigo/redis" - util "github.com/goharbor/harbor/src/common/utils/redis" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/orm" + redislib "github.com/goharbor/harbor/src/lib/redis" "github.com/goharbor/harbor/src/pkg/blob" blob_models "github.com/goharbor/harbor/src/pkg/blob/models" + "github.com/gomodule/redigo/redis" ) var ( @@ -290,7 +291,7 @@ func (c *controller) Sync(ctx context.Context, references []distribution.Descrip } func (c *controller) SetAcceptedBlobSize(sessionID string, size int64) error { - conn := util.DefaultPool().Get() + conn := redislib.DefaultPool().Get() defer conn.Close() key := fmt.Sprintf("upload:%s:size", sessionID) @@ -307,7 +308,7 @@ func (c *controller) SetAcceptedBlobSize(sessionID string, size int64) error { } func (c *controller) GetAcceptedBlobSize(sessionID string) (int64, error) { - conn := util.DefaultPool().Get() + conn := redislib.DefaultPool().Get() defer conn.Close() key := fmt.Sprintf("upload:%s:size", sessionID) diff --git a/src/controller/quota/controller.go b/src/controller/quota/controller.go index 6f6e8de17..b63aa262b 100644 --- a/src/controller/quota/controller.go +++ b/src/controller/quota/controller.go @@ -19,15 +19,15 @@ import ( "fmt" "time" - "github.com/garyburd/redigo/redis" - util "github.com/goharbor/harbor/src/common/utils/redis" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/lib/q" + redislib "github.com/goharbor/harbor/src/lib/redis" "github.com/goharbor/harbor/src/pkg/quota" "github.com/goharbor/harbor/src/pkg/quota/driver" "github.com/goharbor/harbor/src/pkg/quota/types" + "github.com/gomodule/redigo/redis" // quota driver _ "github.com/goharbor/harbor/src/controller/quota/driver" @@ -127,7 +127,7 @@ func (c *controller) List(ctx context.Context, query *q.Query) ([]*quota.Quota, } func (c *controller) getReservedResources(ctx context.Context, reference, referenceID string) (types.ResourceList, error) { - conn := util.DefaultPool().Get() + conn := redislib.DefaultPool().Get() defer conn.Close() key := reservedResourcesKey(reference, referenceID) @@ -143,7 +143,7 @@ func (c *controller) getReservedResources(ctx context.Context, reference, refere } func (c *controller) setReservedResources(ctx context.Context, reference, referenceID string, resources types.ResourceList) error { - conn := util.DefaultPool().Get() + conn := redislib.DefaultPool().Get() defer conn.Close() key := reservedResourcesKey(reference, referenceID) diff --git a/src/core/api/health.go b/src/core/api/health.go index 073c351e7..2e19bc950 100644 --- a/src/core/api/health.go +++ b/src/core/api/health.go @@ -28,7 +28,7 @@ import ( "github.com/goharbor/harbor/src/common/utils" "github.com/goharbor/harbor/src/core/config" "github.com/goharbor/harbor/src/lib/log" - "github.com/gomodule/redigo/redis" + "github.com/goharbor/harbor/src/lib/redis" ) var ( @@ -260,22 +260,10 @@ func databaseHealthChecker() health.Checker { } func redisHealthChecker() health.Checker { - url := config.GetRedisOfRegURL() - timeout := 60 * time.Second period := 10 * time.Second checker := health.CheckFunc(func() error { - conn, err := redis.DialURL(url, - redis.DialConnectTimeout(timeout*time.Second), - redis.DialReadTimeout(timeout*time.Second), - redis.DialWriteTimeout(timeout*time.Second)) - if err != nil { - return fmt.Errorf("failed to establish connection with Redis: %v", err) - } + conn := redis.DefaultPool().Get() defer conn.Close() - _, err = conn.Do("PING") - if err != nil { - return fmt.Errorf("failed to run \"PING\": %v", err) - } return nil }) return PeriodicHealthChecker(checker, period) diff --git a/src/core/main.go b/src/core/main.go index 36c49698b..9eed96be2 100755 --- a/src/core/main.go +++ b/src/core/main.go @@ -17,14 +17,17 @@ package main import ( "encoding/gob" "fmt" + "net/url" "os" "os/signal" + "strconv" + "strings" "syscall" "time" "github.com/astaxie/beego" _ "github.com/astaxie/beego/session/redis" - + _ "github.com/astaxie/beego/session/redis_sentinel" "github.com/goharbor/harbor/src/common/dao" common_http "github.com/goharbor/harbor/src/common/http" "github.com/goharbor/harbor/src/common/models" @@ -99,11 +102,61 @@ func main() { beego.BConfig.WebConfig.Session.SessionOn = true beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName - redisURL := os.Getenv("_REDIS_URL") + redisURL := os.Getenv("_REDIS_URL_CORE") if len(redisURL) > 0 { + u, err := url.Parse(redisURL) + if err != nil { + panic("bad _REDIS_URL:" + redisURL) + } gob.Register(models.User{}) - beego.BConfig.WebConfig.Session.SessionProvider = "redis" - beego.BConfig.WebConfig.Session.SessionProviderConfig = redisURL + if u.Scheme == "redis+sentinel" { + ps := strings.Split(u.Path, "/") + if len(ps) < 2 { + panic("bad redis sentinel url: no master name") + } + ss := make([]string, 5) + ss[0] = strings.Join(strings.Split(u.Host, ","), ";") // host + ss[1] = "100" // pool + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + ss[2] = password + } + } + if len(ps) > 2 { + db, err := strconv.Atoi(ps[2]) + if err != nil { + panic("bad redis sentinel url: bad db") + } + if db != 0 { + ss[3] = ps[2] + } + } + ss[4] = ps[1] // monitor name + + beego.BConfig.WebConfig.Session.SessionProvider = "redis_sentinel" + beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",") + } else { + ss := make([]string, 5) + ss[0] = u.Host // host + ss[1] = "100" // pool + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + ss[2] = password + } + } + if len(u.Path) > 1 { + if _, err := strconv.Atoi(u.Path[1:]); err != nil { + panic("bad redis url: bad db") + } + ss[3] = u.Path[1:] + } + ss[4] = u.Query().Get("idle_timeout_seconds") + + beego.BConfig.WebConfig.Session.SessionProvider = "redis" + beego.BConfig.WebConfig.Session.SessionProviderConfig = strings.Join(ss, ",") + } } beego.AddTemplateExt("htm") diff --git a/src/go.mod b/src/go.mod index eaf05cd3b..6ddb38535 100644 --- a/src/go.mod +++ b/src/go.mod @@ -6,6 +6,7 @@ require ( github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.9.3 // indirect github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect + github.com/FZambia/sentinel v1.1.0 github.com/Masterminds/semver v1.4.2 github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect @@ -27,7 +28,6 @@ require ( github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c // indirect github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 - github.com/garyburd/redigo v1.6.0 github.com/ghodss/yaml v1.0.0 github.com/go-openapi/errors v0.19.2 github.com/go-openapi/loads v0.19.4 diff --git a/src/go.sum b/src/go.sum index 44d375ad1..6b875fd35 100644 --- a/src/go.sum +++ b/src/go.sum @@ -51,6 +51,8 @@ github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/FZambia/sentinel v1.1.0 h1:qrCBfxc8SvJihYNjBWgwUI93ZCvFe/PJIPTHKmlp8a8= +github.com/FZambia/sentinel v1.1.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI= github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg= github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= @@ -253,8 +255,6 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -325,6 +325,7 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0= github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -444,6 +445,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -582,9 +584,11 @@ github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXW github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -994,6 +998,7 @@ gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= @@ -1010,6 +1015,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U= gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/src/jobservice/common/utils/utils.go b/src/jobservice/common/utils/utils.go index 3ee5bfde7..b406d29f7 100644 --- a/src/jobservice/common/utils/utils.go +++ b/src/jobservice/common/utils/utils.go @@ -25,7 +25,6 @@ import ( "net" "net/url" "os" - "strconv" "strings" ) @@ -106,40 +105,6 @@ func IsValidURL(address string) bool { return true } -// TranslateRedisAddress translates the comma format to redis URL -func TranslateRedisAddress(commaFormat string) (string, bool) { - if IsEmptyStr(commaFormat) { - return "", false - } - - sections := strings.Split(commaFormat, ",") - totalSections := len(sections) - if totalSections == 0 { - return "", false - } - - urlParts := make([]string, 0) - // section[0] should be host:port - redisURL := fmt.Sprintf("redis://%s", sections[0]) - if _, err := url.Parse(redisURL); err != nil { - return "", false - } - urlParts = append(urlParts, "redis://", sections[0]) - // Ignore weight - // Check password - if totalSections >= 3 && !IsEmptyStr(sections[2]) { - urlParts = []string{urlParts[0], fmt.Sprintf("%s:%s@", "arbitrary_username", sections[2]), urlParts[1]} - } - - if totalSections >= 4 && !IsEmptyStr(sections[3]) { - if _, err := strconv.Atoi(sections[3]); err == nil { - urlParts = append(urlParts, "/", sections[3]) - } - } - - return strings.Join(urlParts, ""), true -} - // SerializeJob encodes work.Job to json data. func SerializeJob(job *work.Job) ([]byte, error) { return json.Marshal(job) diff --git a/src/jobservice/config/config.go b/src/jobservice/config/config.go index c0d3055d1..877b38ce3 100644 --- a/src/jobservice/config/config.go +++ b/src/jobservice/config/config.go @@ -150,13 +150,10 @@ func (c *Configuration) Load(yamlFilePath string, detectEnv bool) error { redisAddress := c.PoolConfig.RedisPoolCfg.RedisURL if !utils.IsEmptyStr(redisAddress) { if _, err := url.Parse(redisAddress); err != nil { - if redisURL, ok := utils.TranslateRedisAddress(redisAddress); ok { - c.PoolConfig.RedisPoolCfg.RedisURL = redisURL - } - } else { - if !strings.HasPrefix(redisAddress, redisSchema) { - c.PoolConfig.RedisPoolCfg.RedisURL = fmt.Sprintf("%s%s", redisSchema, redisAddress) - } + return fmt.Errorf("bad redis url for jobservice, %s", redisAddress) + } + if !strings.Contains(redisAddress, "://") { + c.PoolConfig.RedisPoolCfg.RedisURL = fmt.Sprintf("%s%s", redisSchema, redisAddress) } } } @@ -313,8 +310,7 @@ func (c *Configuration) validate() error { if utils.IsEmptyStr(c.PoolConfig.RedisPoolCfg.RedisURL) { return errors.New("URL of redis worker is empty") } - - if !strings.HasPrefix(c.PoolConfig.RedisPoolCfg.RedisURL, redisSchema) { + if !strings.Contains(c.PoolConfig.RedisPoolCfg.RedisURL, "://") { return errors.New("invalid redis URL") } diff --git a/src/jobservice/config/config_test.go b/src/jobservice/config/config_test.go index d6676b2e6..6925b123d 100644 --- a/src/jobservice/config/config_test.go +++ b/src/jobservice/config/config_test.go @@ -71,7 +71,7 @@ func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() { ) assert.Equal( suite.T(), - "redis://arbitrary_username:password@8.8.8.8:6379/0", + "redis://:password@8.8.8.8:6379/2", cfg.PoolConfig.RedisPoolCfg.RedisURL, "expect redis URL 'localhost' but got '%s'", cfg.PoolConfig.RedisPoolCfg.RedisURL, @@ -132,7 +132,7 @@ func setENV() error { err = os.Setenv("JOB_SERVICE_HTTPS_KEY", "../server.key") err = os.Setenv("JOB_SERVICE_POOL_BACKEND", "redis") err = os.Setenv("JOB_SERVICE_POOL_WORKERS", "8") - err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "8.8.8.8:6379,100,password,0") + err = os.Setenv("JOB_SERVICE_POOL_REDIS_URL", "redis://:password@8.8.8.8:6379/2") err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace") err = os.Setenv("JOBSERVICE_SECRET", "js_secret") err = os.Setenv("CORE_SECRET", "core_secret") diff --git a/src/jobservice/config_test.yml b/src/jobservice/config_test.yml index 984b60243..c1f2ce433 100644 --- a/src/jobservice/config_test.yml +++ b/src/jobservice/config_test.yml @@ -18,7 +18,7 @@ worker_pool: #Additional config if use 'redis' backend redis_pool: #redis://[arbitrary_username:password@]ipaddress:port/database_index - #or ipaddress:port[,weight,password,database_index] + #or ipaddress:port[|weight|password|database_index] redis_url: "localhost:6379" namespace: "testing_job_service_v2" diff --git a/src/jobservice/job/impl/gc/garbage_collection.go b/src/jobservice/job/impl/gc/garbage_collection.go index db954f4bb..74286e027 100644 --- a/src/jobservice/job/impl/gc/garbage_collection.go +++ b/src/jobservice/job/impl/gc/garbage_collection.go @@ -15,24 +15,24 @@ package gc import ( - "github.com/goharbor/harbor/src/lib/errors" - "github.com/goharbor/harbor/src/pkg/artifactrash/model" - blob_models "github.com/goharbor/harbor/src/pkg/blob/models" "os" "strconv" "time" + "github.com/goharbor/harbor/src/lib/errors" + redislib "github.com/goharbor/harbor/src/lib/redis" + "github.com/goharbor/harbor/src/pkg/artifactrash/model" + blob_models "github.com/goharbor/harbor/src/pkg/blob/models" + "github.com/goharbor/harbor/src/common/models" + "github.com/goharbor/harbor/src/common/registryctl" "github.com/goharbor/harbor/src/controller/artifact" "github.com/goharbor/harbor/src/controller/project" + "github.com/goharbor/harbor/src/jobservice/job" + "github.com/goharbor/harbor/src/jobservice/logger" "github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/pkg/artifactrash" "github.com/goharbor/harbor/src/pkg/blob" - - "github.com/garyburd/redigo/redis" - "github.com/goharbor/harbor/src/common/registryctl" - "github.com/goharbor/harbor/src/jobservice/job" - "github.com/goharbor/harbor/src/jobservice/logger" "github.com/goharbor/harbor/src/registryctl/client" ) @@ -302,17 +302,19 @@ func (gc *GarbageCollector) sweep(ctx job.Context) error { // cleanCache is to clean the registry cache for GC. // To do this is because the issue https://github.com/docker/distribution/issues/2094 func (gc *GarbageCollector) cleanCache() error { - con, err := redis.DialURL( - gc.redisURL, - redis.DialConnectTimeout(dialConnectionTimeout), - redis.DialReadTimeout(dialReadTimeout), - redis.DialWriteTimeout(dialWriteTimeout), - ) - + pool, err := redislib.GetRedisPool("GarbageCollector", gc.redisURL, &redislib.PoolParam{ + PoolMaxIdle: 0, + PoolMaxActive: 1, + PoolIdleTimeout: 60 * time.Second, + DialConnectionTimeout: dialConnectionTimeout, + DialReadTimeout: dialReadTimeout, + DialWriteTimeout: dialWriteTimeout, + }) if err != nil { gc.logger.Errorf("failed to connect to redis %v", err) return err } + con := pool.Get() defer con.Close() // clean all keys in registry redis DB. diff --git a/src/jobservice/job/impl/gc/util.go b/src/jobservice/job/impl/gc/util.go index 5c15bf28f..f021df2f5 100644 --- a/src/jobservice/job/impl/gc/util.go +++ b/src/jobservice/job/impl/gc/util.go @@ -2,9 +2,9 @@ package gc import ( "fmt" - "github.com/garyburd/redigo/redis" "github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/pkg/registry" + "github.com/gomodule/redigo/redis" ) // delKeys ... diff --git a/src/jobservice/runtime/bootstrap.go b/src/jobservice/runtime/bootstrap.go index 8f7b7ba64..08d258f4a 100644 --- a/src/jobservice/runtime/bootstrap.go +++ b/src/jobservice/runtime/bootstrap.go @@ -17,14 +17,13 @@ package runtime import ( "context" "fmt" + redislib "github.com/goharbor/harbor/src/lib/redis" "os" "os/signal" "sync" "syscall" "time" - "github.com/goharbor/harbor/src/pkg/p2p/preheat" - "github.com/goharbor/harbor/src/jobservice/api" "github.com/goharbor/harbor/src/jobservice/common/utils" "github.com/goharbor/harbor/src/jobservice/config" @@ -44,6 +43,7 @@ import ( "github.com/goharbor/harbor/src/jobservice/worker" "github.com/goharbor/harbor/src/jobservice/worker/cworker" "github.com/goharbor/harbor/src/lib/errors" + "github.com/goharbor/harbor/src/pkg/p2p/preheat" "github.com/goharbor/harbor/src/pkg/retention" sc "github.com/goharbor/harbor/src/pkg/scan" "github.com/goharbor/harbor/src/pkg/scan/all" @@ -53,8 +53,7 @@ import ( const ( dialConnectionTimeout = 30 * time.Second - healthCheckPeriod = time.Minute - dialReadTimeout = healthCheckPeriod + 10*time.Second + dialReadTimeout = 10 * time.Second dialWriteTimeout = 10 * time.Second ) @@ -279,25 +278,15 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool( // Get a redis connection pool func (bs *Bootstrap) getRedisPool(redisPoolConfig *config.RedisPoolConfig) *redis.Pool { - return &redis.Pool{ - MaxIdle: 6, - Wait: true, - IdleTimeout: time.Duration(redisPoolConfig.IdleTimeoutSecond) * time.Second, - Dial: func() (redis.Conn, error) { - return redis.DialURL( - redisPoolConfig.RedisURL, - redis.DialConnectTimeout(dialConnectionTimeout), - redis.DialReadTimeout(dialReadTimeout), - redis.DialWriteTimeout(dialWriteTimeout), - ) - }, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - if time.Since(t) < time.Minute { - return nil - } - - _, err := c.Do("PING") - return err - }, + if pool, err := redislib.GetRedisPool("JobService", redisPoolConfig.RedisURL, &redislib.PoolParam{ + PoolMaxIdle: 6, + PoolIdleTimeout: time.Duration(redisPoolConfig.IdleTimeoutSecond) * time.Second, + DialConnectionTimeout: dialConnectionTimeout, + DialReadTimeout: dialReadTimeout, + DialWriteTimeout: dialWriteTimeout, + }); err != nil { + panic(err) + } else { + return pool } } diff --git a/src/jobservice/tests/utils.go b/src/jobservice/tests/utils.go index d5e2d0811..abd942c52 100644 --- a/src/jobservice/tests/utils.go +++ b/src/jobservice/tests/utils.go @@ -18,6 +18,7 @@ package tests import ( "errors" "fmt" + redislib "github.com/goharbor/harbor/src/lib/redis" "os" "time" @@ -36,22 +37,14 @@ const ( // GiveMeRedisPool ... func GiveMeRedisPool() *redis.Pool { redisHost := getRedisHost() - redisPool := &redis.Pool{ - MaxActive: 6, - MaxIdle: 6, - Wait: true, - Dial: func() (redis.Conn, error) { - return redis.Dial( - "tcp", - fmt.Sprintf("%s:%d", redisHost, 6379), - redis.DialConnectTimeout(dialConnectionTimeout), - redis.DialReadTimeout(dialReadTimeout), - redis.DialWriteTimeout(dialWriteTimeout), - ) - }, - } - - return redisPool + pool, _ := redislib.GetRedisPool("test", fmt.Sprintf("redis://%s:%d", redisHost, 6379), &redislib.PoolParam{ + PoolMaxIdle: 6, + PoolMaxActive: 6, + DialConnectionTimeout: dialConnectionTimeout, + DialReadTimeout: dialReadTimeout, + DialWriteTimeout: dialWriteTimeout, + }) + return pool } // GiveMeTestNamespace ... diff --git a/src/lib/redis/helper.go b/src/lib/redis/helper.go new file mode 100644 index 000000000..858e2e3ae --- /dev/null +++ b/src/lib/redis/helper.go @@ -0,0 +1,72 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "os" + "strconv" + "sync" + "time" + + "github.com/goharbor/harbor/src/core/config" + "github.com/gomodule/redigo/redis" +) + +var ( + pool *redis.Pool + poolOnce sync.Once + + poolMaxIdle = 200 + poolMaxActive = 1000 + poolIdleTimeout int64 = 180 + dialConnectionTimeout = 30 * time.Second + dialReadTimeout = 10 * time.Second + dialWriteTimeout = 10 * time.Second +) + +// DefaultPool return default redis pool +func DefaultPool() *redis.Pool { + poolOnce.Do(func() { + maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE")) + if err != nil || maxIdle < 0 { + maxIdle = poolMaxIdle + } + + maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE")) + if err != nil || maxActive < 0 { + maxActive = poolMaxActive + } + + idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64) + if err != nil || idleTimeout < 0 { + idleTimeout = poolIdleTimeout + } + + url := config.GetRedisOfRegURL() + if url == "" { + url = "redis://localhost:6379/1" + } + pool, err = GetRedisPool("CommonRedis", url, &PoolParam{ + PoolMaxIdle: maxIdle, + PoolMaxActive: maxActive, + PoolIdleTimeout: time.Duration(idleTimeout) * time.Second, + DialConnectionTimeout: dialConnectionTimeout, + DialReadTimeout: dialReadTimeout, + DialWriteTimeout: dialWriteTimeout, + }) + }) + + return pool +} diff --git a/src/lib/redis/helper_test.go b/src/lib/redis/helper_test.go new file mode 100644 index 000000000..1527376cd --- /dev/null +++ b/src/lib/redis/helper_test.go @@ -0,0 +1,40 @@ +// Copyright Project Harbor Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package redis + +import ( + "fmt" + "github.com/stretchr/testify/require" + "os" + "testing" +) + +const testingRedisHost = "REDIS_HOST" + +func TestGetRedisPool(t *testing.T) { + pool, err := GetRedisPool("test", fmt.Sprintf("redis://%s:%d", getRedisHost(), 6379), nil) + require.Nil(t, err) + conn := pool.Get() + defer conn.Close() +} + +func getRedisHost() string { + redisHost := os.Getenv(testingRedisHost) + if redisHost == "" { + redisHost = "127.0.0.1" // for local test + } + + return redisHost +} diff --git a/src/lib/redis/redisclient.go b/src/lib/redis/redisclient.go new file mode 100644 index 000000000..9282898ac --- /dev/null +++ b/src/lib/redis/redisclient.go @@ -0,0 +1,172 @@ +package redis + +import ( + "fmt" + "github.com/goharbor/harbor/src/lib/log" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/FZambia/sentinel" + "github.com/gomodule/redigo/redis" +) + +var knownPool sync.Map +var m sync.Mutex + +// PoolParam ... +type PoolParam struct { + PoolMaxIdle int + PoolMaxActive int + PoolIdleTimeout time.Duration + + DialConnectionTimeout time.Duration + DialReadTimeout time.Duration + DialWriteTimeout time.Duration +} + +// GetRedisPool get a named redis pool +// supported rawurl +// redis://user:pass@redis_host:port/db +// redis+sentinel://user:pass@redis_sentinel1:port1,redis_sentinel2:port2/monitor_name/db?idle_timeout_seconds=100 +func GetRedisPool(name string, rawurl string, param *PoolParam) (*redis.Pool, error) { + if p, ok := knownPool.Load(name); ok { + return p.(*redis.Pool), nil + } + m.Lock() + defer m.Unlock() + // load again in case multi threads + if p, ok := knownPool.Load(name); ok { + return p.(*redis.Pool), nil + } + + u, err := url.Parse(rawurl) + if err != nil { + return nil, fmt.Errorf("bad redis url: %s, %s, %s", name, rawurl, err) + } + + if param == nil { + param = &PoolParam{ + PoolMaxIdle: 0, + PoolMaxActive: 1, + PoolIdleTimeout: time.Minute, + DialConnectionTimeout: time.Second, + DialReadTimeout: time.Second, + DialWriteTimeout: time.Second, + } + } + if t := u.Query().Get("idle_timeout_seconds"); t != "" { + if tt, e := strconv.Atoi(t); e == nil { + param.PoolIdleTimeout = time.Second * time.Duration(tt) + } + } + + log.Debug("get redis pool:", name, rawurl) + if u.Scheme == "redis" { + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + return redis.DialURL(rawurl) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + MaxIdle: param.PoolMaxIdle, + MaxActive: param.PoolMaxActive, + IdleTimeout: param.PoolIdleTimeout, + Wait: true, + } + knownPool.Store(name, pool) + return pool, nil + } else if u.Scheme == "redis+sentinel" { + pool, err := getSentinelPool(u, param, err, name) + if err != nil { + return nil, err + } + knownPool.Store(name, pool) + return pool, nil + } else { + return nil, fmt.Errorf("bad redis url: not support scheme %s", u.Scheme) + } +} + +func getSentinelPool(u *url.URL, param *PoolParam, err error, name string) (*redis.Pool, error) { + ps := strings.Split(u.Path, "/") + if len(ps) < 2 { + return nil, fmt.Errorf("bad redis sentinel url: no master name, %s %s", name, u) + } + + log.Debug("getSentinelPool:", u) + var sentinelOptions []redis.DialOption + if param.DialConnectionTimeout > 0 { + log.Debug(name, "sentinel DialConnectionTimeout:", param.DialConnectionTimeout) + sentinelOptions = append(sentinelOptions, redis.DialConnectTimeout(param.DialConnectionTimeout)) + } + if param.DialReadTimeout > 0 { + log.Debug(name, "sentinel DialReadTimeout:", param.DialReadTimeout) + sentinelOptions = append(sentinelOptions, redis.DialReadTimeout(param.DialReadTimeout)) + } + if param.DialWriteTimeout > 0 { + log.Debug(name, "sentinel DialWriteTimeout:", param.DialWriteTimeout) + sentinelOptions = append(sentinelOptions, redis.DialWriteTimeout(param.DialWriteTimeout)) + } + + redisOptions := sentinelOptions + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + log.Debug(name, "redis has password") + redisOptions = append(redisOptions, redis.DialPassword(password)) + } + } + + // sentinel doesn't need select db + db := 0 + if len(ps) > 2 { + db, err = strconv.Atoi(ps[2]) + if err != nil { + return nil, fmt.Errorf("invalid redis db: %s, %s", ps[1], name) + } + if db != 0 { + redisOptions = append(redisOptions, redis.DialDatabase(db)) + } + } + + sntnl := &sentinel.Sentinel{ + Addrs: strings.Split(u.Host, ","), + MasterName: ps[1], + Dial: func(addr string) (redis.Conn, error) { + log.Debug(name, "dial redis sentinel:", addr) + c, err := redis.Dial("tcp", addr, sentinelOptions...) + if err != nil { + return nil, err + } + return c, nil + }, + } + + pool := &redis.Pool{ + Dial: func() (redis.Conn, error) { + masterAddr, err := sntnl.MasterAddr() + if err != nil { + return nil, err + } + log.Debug(name, "dial redis master:", masterAddr, "db:", db) + return redis.Dial("tcp", masterAddr, redisOptions...) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + if !sentinel.TestRole(c, "master") { + return fmt.Errorf("check role failed, %s", name) + } + return nil + }, + MaxIdle: param.PoolMaxIdle, + MaxActive: param.PoolMaxActive, + IdleTimeout: param.PoolIdleTimeout, + Wait: true, + } + return pool, nil +} diff --git a/src/vendor/github.com/FZambia/sentinel/.gitignore b/src/vendor/github.com/FZambia/sentinel/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/src/vendor/github.com/FZambia/sentinel/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/src/vendor/github.com/garyburd/redigo/LICENSE b/src/vendor/github.com/FZambia/sentinel/LICENSE similarity index 89% rename from src/vendor/github.com/garyburd/redigo/LICENSE rename to src/vendor/github.com/FZambia/sentinel/LICENSE index 67db85882..8dada3eda 100644 --- a/src/vendor/github.com/garyburd/redigo/LICENSE +++ b/src/vendor/github.com/FZambia/sentinel/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -173,3 +172,30 @@ defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/src/vendor/github.com/FZambia/sentinel/README.md b/src/vendor/github.com/FZambia/sentinel/README.md new file mode 100644 index 000000000..f83afc076 --- /dev/null +++ b/src/vendor/github.com/FZambia/sentinel/README.md @@ -0,0 +1,39 @@ +go-sentinel +=========== + +Redis Sentinel support for [redigo](https://github.com/gomodule/redigo) library. + +Documentation +------------- + +- [API Reference](http://godoc.org/github.com/FZambia/sentinel) + +Alternative solution +-------------------- + +You can alternatively configure Haproxy between your application and Redis to proxy requests to Redis master instance if you only need HA: + +``` +listen redis + server redis-01 127.0.0.1:6380 check port 6380 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 on-marked-down shutdown-sessions on-marked-up shutdown-backup-sessions + server redis-02 127.0.0.1:6381 check port 6381 check inter 2s weight 1 inter 2s downinter 5s rise 10 fall 2 backup + bind *:6379 + mode tcp + option tcpka + option tcplog + option tcp-check + tcp-check send PING\r\n + tcp-check expect string +PONG + tcp-check send info\ replication\r\n + tcp-check expect string role:master + tcp-check send QUIT\r\n + tcp-check expect string +OK + balance roundrobin +``` + +This way you don't need to use this library. + +License +------- + +Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). diff --git a/src/vendor/github.com/FZambia/sentinel/sentinel.go b/src/vendor/github.com/FZambia/sentinel/sentinel.go new file mode 100644 index 000000000..8dd35a3c3 --- /dev/null +++ b/src/vendor/github.com/FZambia/sentinel/sentinel.go @@ -0,0 +1,421 @@ +package sentinel + +import ( + "errors" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/gomodule/redigo/redis" +) + +// Sentinel provides a way to add high availability (HA) to Redis Pool using +// preconfigured addresses of Sentinel servers and name of master which Sentinels +// monitor. It works with Redis >= 2.8.12 (mostly because of ROLE command that +// was introduced in that version, it's possible though to support old versions +// using INFO command). +// +// Example of the simplest usage to contact master "mymaster": +// +// func newSentinelPool() *redis.Pool { +// sntnl := &sentinel.Sentinel{ +// Addrs: []string{":26379", ":26380", ":26381"}, +// MasterName: "mymaster", +// Dial: func(addr string) (redis.Conn, error) { +// timeout := 500 * time.Millisecond +// c, err := redis.DialTimeout("tcp", addr, timeout, timeout, timeout) +// if err != nil { +// return nil, err +// } +// return c, nil +// }, +// } +// return &redis.Pool{ +// MaxIdle: 3, +// MaxActive: 64, +// Wait: true, +// IdleTimeout: 240 * time.Second, +// Dial: func() (redis.Conn, error) { +// masterAddr, err := sntnl.MasterAddr() +// if err != nil { +// return nil, err +// } +// c, err := redis.Dial("tcp", masterAddr) +// if err != nil { +// return nil, err +// } +// return c, nil +// }, +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// if !sentinel.TestRole(c, "master") { +// return errors.New("Role check failed") +// } else { +// return nil +// } +// }, +// } +// } +type Sentinel struct { + // Addrs is a slice with known Sentinel addresses. + Addrs []string + + // MasterName is a name of Redis master Sentinel servers monitor. + MasterName string + + // Dial is a user supplied function to connect to Sentinel on given address. This + // address will be chosen from Addrs slice. + // Note that as per the redis-sentinel client guidelines, a timeout is mandatory + // while connecting to Sentinels, and should not be set to 0. + Dial func(addr string) (redis.Conn, error) + + // Pool is a user supplied function returning custom connection pool to Sentinel. + // This can be useful to tune options if you are not satisfied with what default + // Sentinel pool offers. See defaultPool() method for default pool implementation. + // In most cases you only need to provide Dial function and let this be nil. + Pool func(addr string) *redis.Pool + + mu sync.RWMutex + pools map[string]*redis.Pool + addr string +} + +// NoSentinelsAvailable is returned when all sentinels in the list are exhausted +// (or none configured), and contains the last error returned by Dial (which +// may be nil) +type NoSentinelsAvailable struct { + lastError error +} + +func (ns NoSentinelsAvailable) Error() string { + if ns.lastError != nil { + return fmt.Sprintf("redigo: no sentinels available; last error: %s", ns.lastError.Error()) + } + return fmt.Sprintf("redigo: no sentinels available") +} + +// putToTop puts Sentinel address to the top of address list - this means +// that all next requests will use Sentinel on this address first. +// +// From Sentinel guidelines: +// +// The first Sentinel replying to the client request should be put at the +// start of the list, so that at the next reconnection, we'll try first +// the Sentinel that was reachable in the previous connection attempt, +// minimizing latency. +// +// Lock must be held by caller. +func (s *Sentinel) putToTop(addr string) { + addrs := s.Addrs + if addrs[0] == addr { + // Already on top. + return + } + newAddrs := []string{addr} + for _, a := range addrs { + if a == addr { + continue + } + newAddrs = append(newAddrs, a) + } + s.Addrs = newAddrs +} + +// putToBottom puts Sentinel address to the bottom of address list. +// We call this method internally when see that some Sentinel failed to answer +// on application request so next time we start with another one. +// +// Lock must be held by caller. +func (s *Sentinel) putToBottom(addr string) { + addrs := s.Addrs + if addrs[len(addrs)-1] == addr { + // Already on bottom. + return + } + newAddrs := []string{} + for _, a := range addrs { + if a == addr { + continue + } + newAddrs = append(newAddrs, a) + } + newAddrs = append(newAddrs, addr) + s.Addrs = newAddrs +} + +// defaultPool returns a connection pool to one Sentinel. This allows +// us to call concurrent requests to Sentinel using connection Do method. +func (s *Sentinel) defaultPool(addr string) *redis.Pool { + return &redis.Pool{ + MaxIdle: 3, + MaxActive: 10, + Wait: true, + IdleTimeout: 240 * time.Second, + Dial: func() (redis.Conn, error) { + return s.Dial(addr) + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } +} + +func (s *Sentinel) get(addr string) redis.Conn { + pool := s.poolForAddr(addr) + return pool.Get() +} + +func (s *Sentinel) poolForAddr(addr string) *redis.Pool { + s.mu.Lock() + if s.pools == nil { + s.pools = make(map[string]*redis.Pool) + } + pool, ok := s.pools[addr] + if ok { + s.mu.Unlock() + return pool + } + s.mu.Unlock() + newPool := s.newPool(addr) + s.mu.Lock() + p, ok := s.pools[addr] + if ok { + s.mu.Unlock() + return p + } + s.pools[addr] = newPool + s.mu.Unlock() + return newPool +} + +func (s *Sentinel) newPool(addr string) *redis.Pool { + if s.Pool != nil { + return s.Pool(addr) + } + return s.defaultPool(addr) +} + +// close connection pool to Sentinel. +// Lock must be hold by caller. +func (s *Sentinel) close() { + if s.pools != nil { + for _, pool := range s.pools { + pool.Close() + } + } + s.pools = nil +} + +func (s *Sentinel) doUntilSuccess(f func(redis.Conn) (interface{}, error)) (interface{}, error) { + s.mu.RLock() + addrs := s.Addrs + s.mu.RUnlock() + + var lastErr error + + for _, addr := range addrs { + conn := s.get(addr) + reply, err := f(conn) + conn.Close() + if err != nil { + lastErr = err + s.mu.Lock() + s.putToBottom(addr) + s.mu.Unlock() + continue + } + s.putToTop(addr) + return reply, nil + } + + return nil, NoSentinelsAvailable{lastError: lastErr} +} + +// MasterAddr returns an address of current Redis master instance. +func (s *Sentinel) MasterAddr() (string, error) { + res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { + return queryForMaster(c, s.MasterName) + }) + if err != nil { + return "", err + } + return res.(string), nil +} + +// SlaveAddrs returns a slice with known slave addresses of current master instance. +func (s *Sentinel) SlaveAddrs() ([]string, error) { + res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { + return queryForSlaveAddrs(c, s.MasterName) + }) + if err != nil { + return nil, err + } + return res.([]string), nil +} + +// Slave represents a Redis slave instance which is known by Sentinel. +type Slave struct { + ip string + port string + flags string +} + +// Addr returns an address of slave. +func (s *Slave) Addr() string { + return net.JoinHostPort(s.ip, s.port) +} + +// Available returns if slave is in working state at moment based on information in slave flags. +func (s *Slave) Available() bool { + return !strings.Contains(s.flags, "disconnected") && !strings.Contains(s.flags, "s_down") +} + +// Slaves returns a slice with known slaves of master instance. +func (s *Sentinel) Slaves() ([]*Slave, error) { + res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { + return queryForSlaves(c, s.MasterName) + }) + if err != nil { + return nil, err + } + return res.([]*Slave), nil +} + +// SentinelAddrs returns a slice of known Sentinel addresses Sentinel server aware of. +func (s *Sentinel) SentinelAddrs() ([]string, error) { + res, err := s.doUntilSuccess(func(c redis.Conn) (interface{}, error) { + return queryForSentinels(c, s.MasterName) + }) + if err != nil { + return nil, err + } + return res.([]string), nil +} + +// Discover allows to update list of known Sentinel addresses. From docs: +// +// A client may update its internal list of Sentinel nodes following this procedure: +// 1) Obtain a list of other Sentinels for this master using the command SENTINEL sentinels . +// 2) Add every ip:port pair not already existing in our list at the end of the list. +func (s *Sentinel) Discover() error { + addrs, err := s.SentinelAddrs() + if err != nil { + return err + } + s.mu.Lock() + for _, addr := range addrs { + if !stringInSlice(addr, s.Addrs) { + s.Addrs = append(s.Addrs, addr) + } + } + s.mu.Unlock() + return nil +} + +// Close closes current connection to Sentinel. +func (s *Sentinel) Close() error { + s.mu.Lock() + s.close() + s.mu.Unlock() + return nil +} + +// TestRole wraps GetRole in a test to verify if the role matches an expected +// role string. If there was any error in querying the supplied connection, +// the function returns false. Works with Redis >= 2.8.12. +// It's not goroutine safe, but if you call this method on pooled connections +// then you are OK. +func TestRole(c redis.Conn, expectedRole string) bool { + role, err := getRole(c) + if err != nil || role != expectedRole { + return false + } + return true +} + +// getRole is a convenience function supplied to query an instance (master or +// slave) for its role. It attempts to use the ROLE command introduced in +// redis 2.8.12. +func getRole(c redis.Conn) (string, error) { + res, err := c.Do("ROLE") + if err != nil { + return "", err + } + rres, ok := res.([]interface{}) + if ok { + return redis.String(rres[0], nil) + } + return "", errors.New("redigo: can not transform ROLE reply to string") +} + +func queryForMaster(conn redis.Conn, masterName string) (string, error) { + res, err := redis.Strings(conn.Do("SENTINEL", "get-master-addr-by-name", masterName)) + if err != nil { + return "", err + } + if len(res) < 2 { + return "", errors.New("redigo: malformed get-master-addr-by-name reply") + } + masterAddr := net.JoinHostPort(res[0], res[1]) + return masterAddr, nil +} + +func queryForSlaveAddrs(conn redis.Conn, masterName string) ([]string, error) { + slaves, err := queryForSlaves(conn, masterName) + if err != nil { + return nil, err + } + slaveAddrs := make([]string, 0) + for _, slave := range slaves { + slaveAddrs = append(slaveAddrs, slave.Addr()) + } + return slaveAddrs, nil +} + +func queryForSlaves(conn redis.Conn, masterName string) ([]*Slave, error) { + res, err := redis.Values(conn.Do("SENTINEL", "slaves", masterName)) + if err != nil { + return nil, err + } + slaves := make([]*Slave, 0) + for _, a := range res { + sm, err := redis.StringMap(a, err) + if err != nil { + return slaves, err + } + slave := &Slave{ + ip: sm["ip"], + port: sm["port"], + flags: sm["flags"], + } + slaves = append(slaves, slave) + } + return slaves, nil +} + +func queryForSentinels(conn redis.Conn, masterName string) ([]string, error) { + res, err := redis.Values(conn.Do("SENTINEL", "sentinels", masterName)) + if err != nil { + return nil, err + } + sentinels := make([]string, 0) + for _, a := range res { + sm, err := redis.StringMap(a, err) + if err != nil { + return sentinels, err + } + sentinels = append(sentinels, fmt.Sprintf("%s:%s", sm["ip"], sm["port"])) + } + return sentinels, nil +} + +func stringInSlice(str string, slice []string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} diff --git a/src/vendor/github.com/astaxie/beego/session/redis_sentinel/sess_redis_sentinel.go b/src/vendor/github.com/astaxie/beego/session/redis_sentinel/sess_redis_sentinel.go new file mode 100644 index 000000000..6ecb29770 --- /dev/null +++ b/src/vendor/github.com/astaxie/beego/session/redis_sentinel/sess_redis_sentinel.go @@ -0,0 +1,234 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redis for session provider +// +// depend on github.com/go-redis/redis +// +// go install github.com/go-redis/redis +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/redis_sentinel" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("redis_sentinel", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:26379;127.0.0.2:26379"}``) +// go globalSessions.GC() +// } +// +// more detail about params: please check the notes on the function SessionInit in this package +package redis_sentinel + +import ( + "github.com/astaxie/beego/session" + "github.com/go-redis/redis" + "net/http" + "strconv" + "strings" + "sync" + "time" +) + +var redispder = &Provider{} + +// DefaultPoolSize redis_sentinel default pool size +var DefaultPoolSize = 100 + +// SessionStore redis_sentinel session store +type SessionStore struct { + p *redis.Client + sid string + lock sync.RWMutex + values map[interface{}]interface{} + maxlifetime int64 +} + +// Set value in redis_sentinel session +func (rs *SessionStore) Set(key, value interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values[key] = value + return nil +} + +// Get value in redis_sentinel session +func (rs *SessionStore) Get(key interface{}) interface{} { + rs.lock.RLock() + defer rs.lock.RUnlock() + if v, ok := rs.values[key]; ok { + return v + } + return nil +} + +// Delete value in redis_sentinel session +func (rs *SessionStore) Delete(key interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + delete(rs.values, key) + return nil +} + +// Flush clear all values in redis_sentinel session +func (rs *SessionStore) Flush() error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get redis_sentinel session id +func (rs *SessionStore) SessionID() string { + return rs.sid +} + +// SessionRelease save session values to redis_sentinel +func (rs *SessionStore) SessionRelease(w http.ResponseWriter) { + b, err := session.EncodeGob(rs.values) + if err != nil { + return + } + c := rs.p + c.Set(rs.sid, string(b), time.Duration(rs.maxlifetime)*time.Second) +} + +// Provider redis_sentinel session provider +type Provider struct { + maxlifetime int64 + savePath string + poolsize int + password string + dbNum int + poollist *redis.Client + masterName string +} + +// SessionInit init redis_sentinel session +// savepath like redis sentinel addr,pool size,password,dbnum,masterName +// e.g. 127.0.0.1:26379;127.0.0.2:26379,100,1qaz2wsx,0,mymaster +func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error { + rp.maxlifetime = maxlifetime + configs := strings.Split(savePath, ",") + if len(configs) > 0 { + rp.savePath = configs[0] + } + if len(configs) > 1 { + poolsize, err := strconv.Atoi(configs[1]) + if err != nil || poolsize < 0 { + rp.poolsize = DefaultPoolSize + } else { + rp.poolsize = poolsize + } + } else { + rp.poolsize = DefaultPoolSize + } + if len(configs) > 2 { + rp.password = configs[2] + } + if len(configs) > 3 { + dbnum, err := strconv.Atoi(configs[3]) + if err != nil || dbnum < 0 { + rp.dbNum = 0 + } else { + rp.dbNum = dbnum + } + } else { + rp.dbNum = 0 + } + if len(configs) > 4 { + if configs[4] != "" { + rp.masterName = configs[4] + } else { + rp.masterName = "mymaster" + } + } else { + rp.masterName = "mymaster" + } + + rp.poollist = redis.NewFailoverClient(&redis.FailoverOptions{ + SentinelAddrs: strings.Split(rp.savePath, ";"), + Password: rp.password, + PoolSize: rp.poolsize, + DB: rp.dbNum, + MasterName: rp.masterName, + }) + + return rp.poollist.Ping().Err() +} + +// SessionRead read redis_sentinel session by sid +func (rp *Provider) SessionRead(sid string) (session.Store, error) { + var kv map[interface{}]interface{} + kvs, err := rp.poollist.Get(sid).Result() + if err != nil && err != redis.Nil { + return nil, err + } + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + if kv, err = session.DecodeGob([]byte(kvs)); err != nil { + return nil, err + } + } + + rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime} + return rs, nil +} + +// SessionExist check redis_sentinel session exist by sid +func (rp *Provider) SessionExist(sid string) bool { + c := rp.poollist + if existed, err := c.Exists(sid).Result(); err != nil || existed == 0 { + return false + } + return true +} + +// SessionRegenerate generate new sid for redis_sentinel session +func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + c := rp.poollist + + if existed, err := c.Exists(oldsid).Result(); err != nil || existed == 0 { + // oldsid doesn't exists, set the new sid directly + // ignore error here, since if it return error + // the existed value will be 0 + c.Set(sid, "", time.Duration(rp.maxlifetime)*time.Second) + } else { + c.Rename(oldsid, sid) + c.Expire(sid, time.Duration(rp.maxlifetime)*time.Second) + } + return rp.SessionRead(sid) +} + +// SessionDestroy delete redis session by id +func (rp *Provider) SessionDestroy(sid string) error { + c := rp.poollist + c.Del(sid) + return nil +} + +// SessionGC Impelment method, no used. +func (rp *Provider) SessionGC() { +} + +// SessionAll return all activeSession +func (rp *Provider) SessionAll() int { + return 0 +} + +func init() { + session.Register("redis_sentinel", redispder) +} diff --git a/src/vendor/github.com/garyburd/redigo/internal/commandinfo.go b/src/vendor/github.com/garyburd/redigo/internal/commandinfo.go deleted file mode 100644 index 11e584257..000000000 --- a/src/vendor/github.com/garyburd/redigo/internal/commandinfo.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2014 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package internal // import "github.com/garyburd/redigo/internal" - -import ( - "strings" -) - -const ( - WatchState = 1 << iota - MultiState - SubscribeState - MonitorState -) - -type CommandInfo struct { - Set, Clear int -} - -var commandInfos = map[string]CommandInfo{ - "WATCH": {Set: WatchState}, - "UNWATCH": {Clear: WatchState}, - "MULTI": {Set: MultiState}, - "EXEC": {Clear: WatchState | MultiState}, - "DISCARD": {Clear: WatchState | MultiState}, - "PSUBSCRIBE": {Set: SubscribeState}, - "SUBSCRIBE": {Set: SubscribeState}, - "MONITOR": {Set: MonitorState}, -} - -func init() { - for n, ci := range commandInfos { - commandInfos[strings.ToLower(n)] = ci - } -} - -func LookupCommandInfo(commandName string) CommandInfo { - if ci, ok := commandInfos[commandName]; ok { - return ci - } - return commandInfos[strings.ToUpper(commandName)] -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/conn.go b/src/vendor/github.com/garyburd/redigo/redis/conn.go deleted file mode 100644 index 5aa0f32f2..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/conn.go +++ /dev/null @@ -1,673 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/url" - "regexp" - "strconv" - "sync" - "time" -) - -var ( - _ ConnWithTimeout = (*conn)(nil) -) - -// conn is the low-level implementation of Conn -type conn struct { - // Shared - mu sync.Mutex - pending int - err error - conn net.Conn - - // Read - readTimeout time.Duration - br *bufio.Reader - - // Write - writeTimeout time.Duration - bw *bufio.Writer - - // Scratch space for formatting argument length. - // '*' or '$', length, "\r\n" - lenScratch [32]byte - - // Scratch space for formatting integers and floats. - numScratch [40]byte -} - -// DialTimeout acts like Dial but takes timeouts for establishing the -// connection to the server, writing a command and reading a reply. -// -// Deprecated: Use Dial with options instead. -func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { - return Dial(network, address, - DialConnectTimeout(connectTimeout), - DialReadTimeout(readTimeout), - DialWriteTimeout(writeTimeout)) -} - -// DialOption specifies an option for dialing a Redis server. -type DialOption struct { - f func(*dialOptions) -} - -type dialOptions struct { - readTimeout time.Duration - writeTimeout time.Duration - dialer *net.Dialer - dial func(network, addr string) (net.Conn, error) - db int - password string - useTLS bool - skipVerify bool - tlsConfig *tls.Config -} - -// DialReadTimeout specifies the timeout for reading a single command reply. -func DialReadTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.readTimeout = d - }} -} - -// DialWriteTimeout specifies the timeout for writing a single command. -func DialWriteTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.writeTimeout = d - }} -} - -// DialConnectTimeout specifies the timeout for connecting to the Redis server when -// no DialNetDial option is specified. -func DialConnectTimeout(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.Timeout = d - }} -} - -// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server -// when no DialNetDial option is specified. -// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then -// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. -func DialKeepAlive(d time.Duration) DialOption { - return DialOption{func(do *dialOptions) { - do.dialer.KeepAlive = d - }} -} - -// DialNetDial specifies a custom dial function for creating TCP -// connections, otherwise a net.Dialer customized via the other options is used. -// DialNetDial overrides DialConnectTimeout and DialKeepAlive. -func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { - return DialOption{func(do *dialOptions) { - do.dial = dial - }} -} - -// DialDatabase specifies the database to select when dialing a connection. -func DialDatabase(db int) DialOption { - return DialOption{func(do *dialOptions) { - do.db = db - }} -} - -// DialPassword specifies the password to use when connecting to -// the Redis server. -func DialPassword(password string) DialOption { - return DialOption{func(do *dialOptions) { - do.password = password - }} -} - -// DialTLSConfig specifies the config to use when a TLS connection is dialed. -// Has no effect when not dialing a TLS connection. -func DialTLSConfig(c *tls.Config) DialOption { - return DialOption{func(do *dialOptions) { - do.tlsConfig = c - }} -} - -// DialTLSSkipVerify disables server name verification when connecting over -// TLS. Has no effect when not dialing a TLS connection. -func DialTLSSkipVerify(skip bool) DialOption { - return DialOption{func(do *dialOptions) { - do.skipVerify = skip - }} -} - -// DialUseTLS specifies whether TLS should be used when connecting to the -// server. This option is ignore by DialURL. -func DialUseTLS(useTLS bool) DialOption { - return DialOption{func(do *dialOptions) { - do.useTLS = useTLS - }} -} - -// Dial connects to the Redis server at the given network and -// address using the specified options. -func Dial(network, address string, options ...DialOption) (Conn, error) { - do := dialOptions{ - dialer: &net.Dialer{ - KeepAlive: time.Minute * 5, - }, - } - for _, option := range options { - option.f(&do) - } - if do.dial == nil { - do.dial = do.dialer.Dial - } - - netConn, err := do.dial(network, address) - if err != nil { - return nil, err - } - - if do.useTLS { - var tlsConfig *tls.Config - if do.tlsConfig == nil { - tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} - } else { - tlsConfig = cloneTLSConfig(do.tlsConfig) - } - if tlsConfig.ServerName == "" { - host, _, err := net.SplitHostPort(address) - if err != nil { - netConn.Close() - return nil, err - } - tlsConfig.ServerName = host - } - - tlsConn := tls.Client(netConn, tlsConfig) - if err := tlsConn.Handshake(); err != nil { - netConn.Close() - return nil, err - } - netConn = tlsConn - } - - c := &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: do.readTimeout, - writeTimeout: do.writeTimeout, - } - - if do.password != "" { - if _, err := c.Do("AUTH", do.password); err != nil { - netConn.Close() - return nil, err - } - } - - if do.db != 0 { - if _, err := c.Do("SELECT", do.db); err != nil { - netConn.Close() - return nil, err - } - } - - return c, nil -} - -var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) - -// DialURL connects to a Redis server at the given URL using the Redis -// URI scheme. URLs should follow the draft IANA specification for the -// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). -func DialURL(rawurl string, options ...DialOption) (Conn, error) { - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - - if u.Scheme != "redis" && u.Scheme != "rediss" { - return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) - } - - // As per the IANA draft spec, the host defaults to localhost and - // the port defaults to 6379. - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // assume port is missing - host = u.Host - port = "6379" - } - if host == "" { - host = "localhost" - } - address := net.JoinHostPort(host, port) - - if u.User != nil { - password, isSet := u.User.Password() - if isSet { - options = append(options, DialPassword(password)) - } - } - - match := pathDBRegexp.FindStringSubmatch(u.Path) - if len(match) == 2 { - db := 0 - if len(match[1]) > 0 { - db, err = strconv.Atoi(match[1]) - if err != nil { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - } - if db != 0 { - options = append(options, DialDatabase(db)) - } - } else if u.Path != "" { - return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) - } - - options = append(options, DialUseTLS(u.Scheme == "rediss")) - - return Dial("tcp", address, options...) -} - -// NewConn returns a new Redigo connection for the given net connection. -func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { - return &conn{ - conn: netConn, - bw: bufio.NewWriter(netConn), - br: bufio.NewReader(netConn), - readTimeout: readTimeout, - writeTimeout: writeTimeout, - } -} - -func (c *conn) Close() error { - c.mu.Lock() - err := c.err - if c.err == nil { - c.err = errors.New("redigo: closed") - err = c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) fatal(err error) error { - c.mu.Lock() - if c.err == nil { - c.err = err - // Close connection to force errors on subsequent calls and to unblock - // other reader or writer. - c.conn.Close() - } - c.mu.Unlock() - return err -} - -func (c *conn) Err() error { - c.mu.Lock() - err := c.err - c.mu.Unlock() - return err -} - -func (c *conn) writeLen(prefix byte, n int) error { - c.lenScratch[len(c.lenScratch)-1] = '\n' - c.lenScratch[len(c.lenScratch)-2] = '\r' - i := len(c.lenScratch) - 3 - for { - c.lenScratch[i] = byte('0' + n%10) - i -= 1 - n = n / 10 - if n == 0 { - break - } - } - c.lenScratch[i] = prefix - _, err := c.bw.Write(c.lenScratch[i:]) - return err -} - -func (c *conn) writeString(s string) error { - c.writeLen('$', len(s)) - c.bw.WriteString(s) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeBytes(p []byte) error { - c.writeLen('$', len(p)) - c.bw.Write(p) - _, err := c.bw.WriteString("\r\n") - return err -} - -func (c *conn) writeInt64(n int64) error { - return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) -} - -func (c *conn) writeFloat64(n float64) error { - return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) -} - -func (c *conn) writeCommand(cmd string, args []interface{}) error { - c.writeLen('*', 1+len(args)) - if err := c.writeString(cmd); err != nil { - return err - } - for _, arg := range args { - if err := c.writeArg(arg, true); err != nil { - return err - } - } - return nil -} - -func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { - switch arg := arg.(type) { - case string: - return c.writeString(arg) - case []byte: - return c.writeBytes(arg) - case int: - return c.writeInt64(int64(arg)) - case int64: - return c.writeInt64(arg) - case float64: - return c.writeFloat64(arg) - case bool: - if arg { - return c.writeString("1") - } else { - return c.writeString("0") - } - case nil: - return c.writeString("") - case Argument: - if argumentTypeOK { - return c.writeArg(arg.RedisArg(), false) - } - // See comment in default clause below. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - default: - // This default clause is intended to handle builtin numeric types. - // The function should return an error for other types, but this is not - // done for compatibility with previous versions of the package. - var buf bytes.Buffer - fmt.Fprint(&buf, arg) - return c.writeBytes(buf.Bytes()) - } -} - -type protocolError string - -func (pe protocolError) Error() string { - return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) -} - -func (c *conn) readLine() ([]byte, error) { - p, err := c.br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - return nil, protocolError("long response line") - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, protocolError("bad response line terminator") - } - return p[:i], nil -} - -// parseLen parses bulk string and array lengths. -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, protocolError("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - // handle $-1 and $-1 null replies. - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, protocolError("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -// parseInt parses an integer reply. -func parseInt(p []byte) (interface{}, error) { - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, protocolError("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, protocolError("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -var ( - okReply interface{} = "OK" - pongReply interface{} = "PONG" -) - -func (c *conn) readReply() (interface{}, error) { - line, err := c.readLine() - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, protocolError("short response line") - } - switch line[0] { - case '+': - switch { - case len(line) == 3 && line[1] == 'O' && line[2] == 'K': - // Avoid allocation for frequent "+OK" response. - return okReply, nil - case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': - // Avoid allocation in PING command benchmarks :) - return pongReply, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - return parseInt(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(c.br, p) - if err != nil { - return nil, err - } - if line, err := c.readLine(); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, protocolError("bad bulk string format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = c.readReply() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, protocolError("unexpected response line") -} - -func (c *conn) Send(cmd string, args ...interface{}) error { - c.mu.Lock() - c.pending += 1 - c.mu.Unlock() - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.writeCommand(cmd, args); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Flush() error { - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - if err := c.bw.Flush(); err != nil { - return c.fatal(err) - } - return nil -} - -func (c *conn) Receive() (interface{}, error) { - return c.ReceiveWithTimeout(c.readTimeout) -} - -func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - var deadline time.Time - if timeout != 0 { - deadline = time.Now().Add(timeout) - } - c.conn.SetReadDeadline(deadline) - - if reply, err = c.readReply(); err != nil { - return nil, c.fatal(err) - } - // When using pub/sub, the number of receives can be greater than the - // number of sends. To enable normal use of the connection after - // unsubscribing from all channels, we do not decrement pending to a - // negative value. - // - // The pending field is decremented after the reply is read to handle the - // case where Receive is called before Send. - c.mu.Lock() - if c.pending > 0 { - c.pending -= 1 - } - c.mu.Unlock() - if err, ok := reply.(Error); ok { - return nil, err - } - return -} - -func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { - return c.DoWithTimeout(c.readTimeout, cmd, args...) -} - -func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - c.mu.Lock() - pending := c.pending - c.pending = 0 - c.mu.Unlock() - - if cmd == "" && pending == 0 { - return nil, nil - } - - if c.writeTimeout != 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - } - - if cmd != "" { - if err := c.writeCommand(cmd, args); err != nil { - return nil, c.fatal(err) - } - } - - if err := c.bw.Flush(); err != nil { - return nil, c.fatal(err) - } - - var deadline time.Time - if readTimeout != 0 { - deadline = time.Now().Add(readTimeout) - } - c.conn.SetReadDeadline(deadline) - - if cmd == "" { - reply := make([]interface{}, pending) - for i := range reply { - r, e := c.readReply() - if e != nil { - return nil, c.fatal(e) - } - reply[i] = r - } - return reply, nil - } - - var err error - var reply interface{} - for i := 0; i <= pending; i++ { - var e error - if reply, e = c.readReply(); e != nil { - return nil, c.fatal(e) - } - if e, ok := reply.(Error); ok && err == nil { - err = e - } - } - return reply, err -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/doc.go b/src/vendor/github.com/garyburd/redigo/redis/doc.go deleted file mode 100644 index 1d19c1668..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/doc.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package redis is a client for the Redis database. -// -// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more -// documentation about this package. -// -// Connections -// -// The Conn interface is the primary interface for working with Redis. -// Applications create connections by calling the Dial, DialWithTimeout or -// NewConn functions. In the future, functions will be added for creating -// sharded and other types of connections. -// -// The application must call the connection Close method when the application -// is done with the connection. -// -// Executing Commands -// -// The Conn interface has a generic method for executing Redis commands: -// -// Do(commandName string, args ...interface{}) (reply interface{}, err error) -// -// The Redis command reference (http://redis.io/commands) lists the available -// commands. An example of using the Redis APPEND command is: -// -// n, err := conn.Do("APPEND", "key", "value") -// -// The Do method converts command arguments to bulk strings for transmission -// to the server as follows: -// -// Go Type Conversion -// []byte Sent as is -// string Sent as is -// int, int64 strconv.FormatInt(v) -// float64 strconv.FormatFloat(v, 'g', -1, 64) -// bool true -> "1", false -> "0" -// nil "" -// all other types fmt.Fprint(w, v) -// -// Redis command reply types are represented using the following Go types: -// -// Redis type Go type -// error redis.Error -// integer int64 -// simple string string -// bulk string []byte or nil if value not present. -// array []interface{} or nil if value not present. -// -// Use type assertions or the reply helper functions to convert from -// interface{} to the specific Go type for the command result. -// -// Pipelining -// -// Connections support pipelining using the Send, Flush and Receive methods. -// -// Send(commandName string, args ...interface{}) error -// Flush() error -// Receive() (reply interface{}, err error) -// -// Send writes the command to the connection's output buffer. Flush flushes the -// connection's output buffer to the server. Receive reads a single reply from -// the server. The following example shows a simple pipeline. -// -// c.Send("SET", "foo", "bar") -// c.Send("GET", "foo") -// c.Flush() -// c.Receive() // reply from SET -// v, err = c.Receive() // reply from GET -// -// The Do method combines the functionality of the Send, Flush and Receive -// methods. The Do method starts by writing the command and flushing the output -// buffer. Next, the Do method receives all pending replies including the reply -// for the command just sent by Do. If any of the received replies is an error, -// then Do returns the error. If there are no errors, then Do returns the last -// reply. If the command argument to the Do method is "", then the Do method -// will flush the output buffer and receive pending replies without sending a -// command. -// -// Use the Send and Do methods to implement pipelined transactions. -// -// c.Send("MULTI") -// c.Send("INCR", "foo") -// c.Send("INCR", "bar") -// r, err := c.Do("EXEC") -// fmt.Println(r) // prints [1, 1] -// -// Concurrency -// -// Connections support one concurrent caller to the Receive method and one -// concurrent caller to the Send and Flush methods. No other concurrency is -// supported including concurrent calls to the Do method. -// -// For full concurrent access to Redis, use the thread-safe Pool to get, use -// and release a connection from within a goroutine. Connections returned from -// a Pool have the concurrency restrictions described in the previous -// paragraph. -// -// Publish and Subscribe -// -// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. -// -// c.Send("SUBSCRIBE", "example") -// c.Flush() -// for { -// reply, err := c.Receive() -// if err != nil { -// return err -// } -// // process pushed message -// } -// -// The PubSubConn type wraps a Conn with convenience methods for implementing -// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods -// send and flush a subscription management command. The receive method -// converts a pushed message to convenient types for use in a type switch. -// -// psc := redis.PubSubConn{Conn: c} -// psc.Subscribe("example") -// for { -// switch v := psc.Receive().(type) { -// case redis.Message: -// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) -// case redis.Subscription: -// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) -// case error: -// return v -// } -// } -// -// Reply Helpers -// -// The Bool, Int, Bytes, String, Strings and Values functions convert a reply -// to a value of a specific type. To allow convenient wrapping of calls to the -// connection Do and Receive methods, the functions take a second argument of -// type error. If the error is non-nil, then the helper function returns the -// error. If the error is nil, the function converts the reply to the specified -// type: -// -// exists, err := redis.Bool(c.Do("EXISTS", "foo")) -// if err != nil { -// // handle error return from c.Do or type conversion error. -// } -// -// The Scan function converts elements of a array reply to Go types: -// -// var value1 int -// var value2 string -// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) -// if err != nil { -// // handle error -// } -// if _, err := redis.Scan(reply, &value1, &value2); err != nil { -// // handle error -// } -// -// Errors -// -// Connection methods return error replies from the server as type redis.Error. -// -// Call the connection Err() method to determine if the connection encountered -// non-recoverable error such as a network error or protocol parsing error. If -// Err() returns a non-nil value, then the connection is not usable and should -// be closed. -package redis // import "github.com/garyburd/redigo/redis" diff --git a/src/vendor/github.com/garyburd/redigo/redis/go16.go b/src/vendor/github.com/garyburd/redigo/redis/go16.go deleted file mode 100644 index f6b1a7ccd..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/go16.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !go1.7 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/go17.go b/src/vendor/github.com/garyburd/redigo/redis/go17.go deleted file mode 100644 index 5f3637911..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/go17.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build go1.7,!go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, - Renegotiation: cfg.Renegotiation, - } -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/go18.go b/src/vendor/github.com/garyburd/redigo/redis/go18.go deleted file mode 100644 index 558363be3..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package redis - -import "crypto/tls" - -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - return cfg.Clone() -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/log.go b/src/vendor/github.com/garyburd/redigo/redis/log.go deleted file mode 100644 index b2996611c..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/log.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "fmt" - "log" - "time" -) - -var ( - _ ConnWithTimeout = (*loggingConn)(nil) -) - -// NewLoggingConn returns a logging wrapper around a connection. -func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { - if prefix != "" { - prefix = prefix + "." - } - return &loggingConn{conn, logger, prefix} -} - -type loggingConn struct { - Conn - logger *log.Logger - prefix string -} - -func (c *loggingConn) Close() error { - err := c.Conn.Close() - var buf bytes.Buffer - fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) - c.logger.Output(2, buf.String()) - return err -} - -func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { - const chop = 32 - switch v := v.(type) { - case []byte: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case string: - if len(v) > chop { - fmt.Fprintf(buf, "%q...", v[:chop]) - } else { - fmt.Fprintf(buf, "%q", v) - } - case []interface{}: - if len(v) == 0 { - buf.WriteString("[]") - } else { - sep := "[" - fin := "]" - if len(v) > chop { - v = v[:chop] - fin = "...]" - } - for _, vv := range v { - buf.WriteString(sep) - c.printValue(buf, vv) - sep = ", " - } - buf.WriteString(fin) - } - default: - fmt.Fprint(buf, v) - } -} - -func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%s%s(", c.prefix, method) - if method != "Receive" { - buf.WriteString(commandName) - for _, arg := range args { - buf.WriteString(", ") - c.printValue(&buf, arg) - } - } - buf.WriteString(") -> (") - if method != "Send" { - c.printValue(&buf, reply) - buf.WriteString(", ") - } - fmt.Fprintf(&buf, "%v)", err) - c.logger.Output(3, buf.String()) -} - -func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { - reply, err := c.Conn.Do(commandName, args...) - c.print("Do", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { - reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) - c.print("DoWithTimeout", commandName, args, reply, err) - return reply, err -} - -func (c *loggingConn) Send(commandName string, args ...interface{}) error { - err := c.Conn.Send(commandName, args...) - c.print("Send", commandName, args, nil, err) - return err -} - -func (c *loggingConn) Receive() (interface{}, error) { - reply, err := c.Conn.Receive() - c.print("Receive", "", nil, reply, err) - return reply, err -} - -func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { - reply, err := ReceiveWithTimeout(c.Conn, timeout) - c.print("ReceiveWithTimeout", "", nil, reply, err) - return reply, err -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/pool.go b/src/vendor/github.com/garyburd/redigo/redis/pool.go deleted file mode 100644 index 3e6f4260a..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/pool.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "bytes" - "crypto/rand" - "crypto/sha1" - "errors" - "io" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/garyburd/redigo/internal" -) - -var ( - _ ConnWithTimeout = (*pooledConnection)(nil) - _ ConnWithTimeout = (*errorConnection)(nil) -) - -var nowFunc = time.Now // for testing - -// ErrPoolExhausted is returned from a pool connection method (Do, Send, -// Receive, Flush, Err) when the maximum number of database connections in the -// pool has been reached. -var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") - -var ( - errPoolClosed = errors.New("redigo: connection pool closed") - errConnClosed = errors.New("redigo: connection closed") -) - -// Pool maintains a pool of connections. The application calls the Get method -// to get a connection from the pool and the connection's Close method to -// return the connection's resources to the pool. -// -// The following example shows how to use a pool in a web application. The -// application creates a pool at application startup and makes it available to -// request handlers using a package level variable. The pool configuration used -// here is an example, not a recommendation. -// -// func newPool(addr string) *redis.Pool { -// return &redis.Pool{ -// MaxIdle: 3, -// IdleTimeout: 240 * time.Second, -// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, -// } -// } -// -// var ( -// pool *redis.Pool -// redisServer = flag.String("redisServer", ":6379", "") -// ) -// -// func main() { -// flag.Parse() -// pool = newPool(*redisServer) -// ... -// } -// -// A request handler gets a connection from the pool and closes the connection -// when the handler is done: -// -// func serveHome(w http.ResponseWriter, r *http.Request) { -// conn := pool.Get() -// defer conn.Close() -// ... -// } -// -// Use the Dial function to authenticate connections with the AUTH command or -// select a database with the SELECT command: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// Dial: func () (redis.Conn, error) { -// c, err := redis.Dial("tcp", server) -// if err != nil { -// return nil, err -// } -// if _, err := c.Do("AUTH", password); err != nil { -// c.Close() -// return nil, err -// } -// if _, err := c.Do("SELECT", db); err != nil { -// c.Close() -// return nil, err -// } -// return c, nil -// }, -// } -// -// Use the TestOnBorrow function to check the health of an idle connection -// before the connection is returned to the application. This example PINGs -// connections that have been idle more than a minute: -// -// pool := &redis.Pool{ -// // Other pool configuration not shown in this example. -// TestOnBorrow: func(c redis.Conn, t time.Time) error { -// if time.Since(t) < time.Minute { -// return nil -// } -// _, err := c.Do("PING") -// return err -// }, -// } -// -type Pool struct { - // Dial is an application supplied function for creating and configuring a - // connection. - // - // The connection returned from Dial must not be in a special state - // (subscribed to pubsub channel, transaction started, ...). - Dial func() (Conn, error) - - // TestOnBorrow is an optional application supplied function for checking - // the health of an idle connection before the connection is used again by - // the application. Argument t is the time that the connection was returned - // to the pool. If the function returns an error, then the connection is - // closed. - TestOnBorrow func(c Conn, t time.Time) error - - // Maximum number of idle connections in the pool. - MaxIdle int - - // Maximum number of connections allocated by the pool at a given time. - // When zero, there is no limit on the number of connections in the pool. - MaxActive int - - // Close connections after remaining idle for this duration. If the value - // is zero, then idle connections are not closed. Applications should set - // the timeout to a value less than the server's timeout. - IdleTimeout time.Duration - - // If Wait is true and the pool is at the MaxActive limit, then Get() waits - // for a connection to be returned to the pool before returning. - Wait bool - - chInitialized uint32 // set to 1 when field ch is initialized - - mu sync.Mutex // mu protects the following fields - closed bool // set to true when the pool is closed. - active int // the number of open connections in the pool - ch chan struct{} // limits open connections when p.Wait is true - idle idleList // idle connections -} - -// NewPool creates a new pool. -// -// Deprecated: Initialize the Pool directory as shown in the example. -func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { - return &Pool{Dial: newFn, MaxIdle: maxIdle} -} - -// Get gets a connection. The application must close the returned connection. -// This method always returns a valid connection so that applications can defer -// error handling to the first use of the connection. If there is an error -// getting an underlying connection, then the connection Err, Do, Send, Flush -// and Receive methods return that error. -func (p *Pool) Get() Conn { - c, err := p.get(nil) - if err != nil { - return errorConnection{err} - } - return &pooledConnection{p: p, c: c} -} - -// PoolStats contains pool statistics. -type PoolStats struct { - // ActiveCount is the number of connections in the pool. The count includes - // idle connections and connections in use. - ActiveCount int - // IdleCount is the number of idle connections in the pool. - IdleCount int -} - -// Stats returns pool's statistics. -func (p *Pool) Stats() PoolStats { - p.mu.Lock() - stats := PoolStats{ - ActiveCount: p.active, - IdleCount: p.idle.count, - } - p.mu.Unlock() - - return stats -} - -// ActiveCount returns the number of connections in the pool. The count -// includes idle connections and connections in use. -func (p *Pool) ActiveCount() int { - p.mu.Lock() - active := p.active - p.mu.Unlock() - return active -} - -// IdleCount returns the number of idle connections in the pool. -func (p *Pool) IdleCount() int { - p.mu.Lock() - idle := p.idle.count - p.mu.Unlock() - return idle -} - -// Close releases the resources used by the pool. -func (p *Pool) Close() error { - p.mu.Lock() - if p.closed { - p.mu.Unlock() - return nil - } - p.closed = true - p.active -= p.idle.count - ic := p.idle.front - p.idle.count = 0 - p.idle.front, p.idle.back = nil, nil - if p.ch != nil { - close(p.ch) - } - p.mu.Unlock() - for ; ic != nil; ic = ic.next { - ic.c.Close() - } - return nil -} - -func (p *Pool) lazyInit() { - // Fast path. - if atomic.LoadUint32(&p.chInitialized) == 1 { - return - } - // Slow path. - p.mu.Lock() - if p.chInitialized == 0 { - p.ch = make(chan struct{}, p.MaxActive) - if p.closed { - close(p.ch) - } else { - for i := 0; i < p.MaxActive; i++ { - p.ch <- struct{}{} - } - } - atomic.StoreUint32(&p.chInitialized, 1) - } - p.mu.Unlock() -} - -// get prunes stale connections and returns a connection from the idle list or -// creates a new connection. -func (p *Pool) get(ctx interface { - Done() <-chan struct{} - Err() error -}) (Conn, error) { - - // Handle limit for p.Wait == true. - if p.Wait && p.MaxActive > 0 { - p.lazyInit() - if ctx == nil { - <-p.ch - } else { - select { - case <-p.ch: - case <-ctx.Done(): - return nil, ctx.Err() - } - } - } - - p.mu.Lock() - - // Prune stale connections at the back of the idle list. - if p.IdleTimeout > 0 { - n := p.idle.count - for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { - c := p.idle.back.c - p.idle.popBack() - p.mu.Unlock() - c.Close() - p.mu.Lock() - p.active-- - } - } - - // Get idle connection from the front of idle list. - for p.idle.front != nil { - ic := p.idle.front - p.idle.popFront() - p.mu.Unlock() - if p.TestOnBorrow == nil || p.TestOnBorrow(ic.c, ic.t) == nil { - return ic.c, nil - } - ic.c.Close() - p.mu.Lock() - p.active-- - } - - // Check for pool closed before dialing a new connection. - if p.closed { - p.mu.Unlock() - return nil, errors.New("redigo: get on closed pool") - } - - // Handle limit for p.Wait == false. - if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { - p.mu.Unlock() - return nil, ErrPoolExhausted - } - - p.active++ - p.mu.Unlock() - c, err := p.Dial() - if err != nil { - c = nil - p.mu.Lock() - p.active-- - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - } - return c, err -} - -func (p *Pool) put(c Conn, forceClose bool) error { - p.mu.Lock() - if !p.closed && !forceClose { - p.idle.pushFront(&idleConn{t: nowFunc(), c: c}) - if p.idle.count > p.MaxIdle { - c = p.idle.back.c - p.idle.popBack() - } else { - c = nil - } - } - - if c != nil { - p.mu.Unlock() - c.Close() - p.mu.Lock() - p.active-- - } - - if p.ch != nil && !p.closed { - p.ch <- struct{}{} - } - p.mu.Unlock() - return nil -} - -type pooledConnection struct { - p *Pool - c Conn - state int -} - -var ( - sentinel []byte - sentinelOnce sync.Once -) - -func initSentinel() { - p := make([]byte, 64) - if _, err := rand.Read(p); err == nil { - sentinel = p - } else { - h := sha1.New() - io.WriteString(h, "Oops, rand failed. Use time instead.") - io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) - sentinel = h.Sum(nil) - } -} - -func (pc *pooledConnection) Close() error { - c := pc.c - if _, ok := c.(errorConnection); ok { - return nil - } - pc.c = errorConnection{errConnClosed} - - if pc.state&internal.MultiState != 0 { - c.Send("DISCARD") - pc.state &^= (internal.MultiState | internal.WatchState) - } else if pc.state&internal.WatchState != 0 { - c.Send("UNWATCH") - pc.state &^= internal.WatchState - } - if pc.state&internal.SubscribeState != 0 { - c.Send("UNSUBSCRIBE") - c.Send("PUNSUBSCRIBE") - // To detect the end of the message stream, ask the server to echo - // a sentinel value and read until we see that value. - sentinelOnce.Do(initSentinel) - c.Send("ECHO", sentinel) - c.Flush() - for { - p, err := c.Receive() - if err != nil { - break - } - if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { - pc.state &^= internal.SubscribeState - break - } - } - } - c.Do("") - pc.p.put(c, pc.state != 0 || c.Err() != nil) - return nil -} - -func (pc *pooledConnection) Err() error { - return pc.c.Err() -} - -func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Do(commandName, args...) -} - -func (pc *pooledConnection) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return cwt.DoWithTimeout(timeout, commandName, args...) -} - -func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { - ci := internal.LookupCommandInfo(commandName) - pc.state = (pc.state | ci.Set) &^ ci.Clear - return pc.c.Send(commandName, args...) -} - -func (pc *pooledConnection) Flush() error { - return pc.c.Flush() -} - -func (pc *pooledConnection) Receive() (reply interface{}, err error) { - return pc.c.Receive() -} - -func (pc *pooledConnection) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { - cwt, ok := pc.c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} - -type errorConnection struct{ err error } - -func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } -func (ec errorConnection) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { - return nil, ec.err -} -func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } -func (ec errorConnection) Err() error { return ec.err } -func (ec errorConnection) Close() error { return nil } -func (ec errorConnection) Flush() error { return ec.err } -func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } -func (ec errorConnection) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } - -type idleList struct { - count int - front, back *idleConn -} - -type idleConn struct { - c Conn - t time.Time - next, prev *idleConn -} - -func (l *idleList) pushFront(ic *idleConn) { - ic.next = l.front - ic.prev = nil - if l.count == 0 { - l.back = ic - } else { - l.front.prev = ic - } - l.front = ic - l.count++ - return -} - -func (l *idleList) popFront() { - ic := l.front - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - ic.next.prev = nil - l.front = ic.next - } - ic.next, ic.prev = nil, nil -} - -func (l *idleList) popBack() { - ic := l.back - l.count-- - if l.count == 0 { - l.front, l.back = nil, nil - } else { - ic.prev.next = nil - l.back = ic.prev - } - ic.next, ic.prev = nil, nil -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/pool17.go b/src/vendor/github.com/garyburd/redigo/redis/pool17.go deleted file mode 100644 index 57a22644f..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/pool17.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// +build go1.7 - -package redis - -import "context" - -// GetContext gets a connection using the provided context. -// -// The provided Context must be non-nil. If the context expires before the -// connection is complete, an error is returned. Any expiration on the context -// will not affect the returned connection. -// -// If the function completes without error, then the application must close the -// returned connection. -func (p *Pool) GetContext(ctx context.Context) (Conn, error) { - c, err := p.get(ctx) - if err != nil { - return errorConnection{err}, err - } - return &pooledConnection{p: p, c: c}, nil -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/pubsub.go b/src/vendor/github.com/garyburd/redigo/redis/pubsub.go deleted file mode 100644 index f0ac82532..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/pubsub.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Subscription represents a subscribe or unsubscribe notification. -type Subscription struct { - // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" - Kind string - - // The channel that was changed. - Channel string - - // The current number of subscriptions for connection. - Count int -} - -// Message represents a message notification. -type Message struct { - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// PMessage represents a pmessage notification. -type PMessage struct { - // The matched pattern. - Pattern string - - // The originating channel. - Channel string - - // The message data. - Data []byte -} - -// Pong represents a pubsub pong notification. -type Pong struct { - Data string -} - -// PubSubConn wraps a Conn with convenience methods for subscribers. -type PubSubConn struct { - Conn Conn -} - -// Close closes the connection. -func (c PubSubConn) Close() error { - return c.Conn.Close() -} - -// Subscribe subscribes the connection to the specified channels. -func (c PubSubConn) Subscribe(channel ...interface{}) error { - c.Conn.Send("SUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PSubscribe subscribes the connection to the given patterns. -func (c PubSubConn) PSubscribe(channel ...interface{}) error { - c.Conn.Send("PSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Unsubscribe unsubscribes the connection from the given channels, or from all -// of them if none is given. -func (c PubSubConn) Unsubscribe(channel ...interface{}) error { - c.Conn.Send("UNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// PUnsubscribe unsubscribes the connection from the given patterns, or from all -// of them if none is given. -func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { - c.Conn.Send("PUNSUBSCRIBE", channel...) - return c.Conn.Flush() -} - -// Ping sends a PING to the server with the specified data. -// -// The connection must be subscribed to at least one channel or pattern when -// calling this method. -func (c PubSubConn) Ping(data string) error { - c.Conn.Send("PING", data) - return c.Conn.Flush() -} - -// Receive returns a pushed message as a Subscription, Message, PMessage, Pong -// or error. The return value is intended to be used directly in a type switch -// as illustrated in the PubSubConn example. -func (c PubSubConn) Receive() interface{} { - return c.receiveInternal(c.Conn.Receive()) -} - -// ReceiveWithTimeout is like Receive, but it allows the application to -// override the connection's default timeout. -func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { - return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) -} - -func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { - reply, err := Values(replyArg, errArg) - if err != nil { - return err - } - - var kind string - reply, err = Scan(reply, &kind) - if err != nil { - return err - } - - switch kind { - case "message": - var m Message - if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { - return err - } - return m - case "pmessage": - var pm PMessage - if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { - return err - } - return pm - case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": - s := Subscription{Kind: kind} - if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { - return err - } - return s - case "pong": - var p Pong - if _, err := Scan(reply, &p.Data); err != nil { - return err - } - return p - } - return errors.New("redigo: unknown pubsub notification") -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/redis.go b/src/vendor/github.com/garyburd/redigo/redis/redis.go deleted file mode 100644 index 141fa4a91..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/redis.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "time" -) - -// Error represents an error returned in a command reply. -type Error string - -func (err Error) Error() string { return string(err) } - -// Conn represents a connection to a Redis server. -type Conn interface { - // Close closes the connection. - Close() error - - // Err returns a non-nil value when the connection is not usable. - Err() error - - // Do sends a command to the server and returns the received reply. - Do(commandName string, args ...interface{}) (reply interface{}, err error) - - // Send writes the command to the client's output buffer. - Send(commandName string, args ...interface{}) error - - // Flush flushes the output buffer to the Redis server. - Flush() error - - // Receive receives a single reply from the Redis server - Receive() (reply interface{}, err error) -} - -// Argument is the interface implemented by an object which wants to control how -// the object is converted to Redis bulk strings. -type Argument interface { - // RedisArg returns a value to be encoded as a bulk string per the - // conversions listed in the section 'Executing Commands'. - // Implementations should typically return a []byte or string. - RedisArg() interface{} -} - -// Scanner is implemented by an object which wants to control its value is -// interpreted when read from Redis. -type Scanner interface { - // RedisScan assigns a value from a Redis value. The argument src is one of - // the reply types listed in the section `Executing Commands`. - // - // An error should be returned if the value cannot be stored without - // loss of information. - RedisScan(src interface{}) error -} - -// ConnWithTimeout is an optional interface that allows the caller to override -// a connection's default read timeout. This interface is useful for executing -// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the -// server. -// -// A connection's default read timeout is set with the DialReadTimeout dial -// option. Applications should rely on the default timeout for commands that do -// not block at the server. -// -// All of the Conn implementations in this package satisfy the ConnWithTimeout -// interface. -// -// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify -// use of this interface. -type ConnWithTimeout interface { - Conn - - // Do sends a command to the server and returns the received reply. - // The timeout overrides the read timeout set when dialing the - // connection. - DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) - - // Receive receives a single reply from the Redis server. The timeout - // overrides the read timeout set when dialing the connection. - ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) -} - -var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") - -// DoWithTimeout executes a Redis command with the specified read timeout. If -// the connection does not satisfy the ConnWithTimeout interface, then an error -// is returned. -func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.DoWithTimeout(timeout, cmd, args...) -} - -// ReceiveWithTimeout receives a reply with the specified read timeout. If the -// connection does not satisfy the ConnWithTimeout interface, then an error is -// returned. -func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { - cwt, ok := c.(ConnWithTimeout) - if !ok { - return nil, errTimeoutNotSupported - } - return cwt.ReceiveWithTimeout(timeout) -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/reply.go b/src/vendor/github.com/garyburd/redigo/redis/reply.go deleted file mode 100644 index c2b3b2b6e..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/reply.go +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "strconv" -) - -// ErrNil indicates that a reply value is nil. -var ErrNil = errors.New("redigo: nil returned") - -// Int is a helper that converts a command reply to an integer. If err is not -// equal to nil, then Int returns 0, err. Otherwise, Int converts the -// reply to an int as follows: -// -// Reply type Result -// integer int(reply), nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int(reply interface{}, err error) (int, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - x := int(reply) - if int64(x) != reply { - return 0, strconv.ErrRange - } - return x, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 0) - return int(n), err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) -} - -// Int64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Int64(reply interface{}, err error) (int64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - return reply, nil - case []byte: - n, err := strconv.ParseInt(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) -} - -var errNegativeInt = errors.New("redigo: unexpected value for Uint64") - -// Uint64 is a helper that converts a command reply to 64 bit integer. If err is -// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the -// reply to an int64 as follows: -// -// Reply type Result -// integer reply, nil -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Uint64(reply interface{}, err error) (uint64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case int64: - if reply < 0 { - return 0, errNegativeInt - } - return uint64(reply), nil - case []byte: - n, err := strconv.ParseUint(string(reply), 10, 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) -} - -// Float64 is a helper that converts a command reply to 64 bit float. If err is -// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts -// the reply to an int as follows: -// -// Reply type Result -// bulk string parsed reply, nil -// nil 0, ErrNil -// other 0, error -func Float64(reply interface{}, err error) (float64, error) { - if err != nil { - return 0, err - } - switch reply := reply.(type) { - case []byte: - n, err := strconv.ParseFloat(string(reply), 64) - return n, err - case nil: - return 0, ErrNil - case Error: - return 0, reply - } - return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) -} - -// String is a helper that converts a command reply to a string. If err is not -// equal to nil, then String returns "", err. Otherwise String converts the -// reply to a string as follows: -// -// Reply type Result -// bulk string string(reply), nil -// simple string reply, nil -// nil "", ErrNil -// other "", error -func String(reply interface{}, err error) (string, error) { - if err != nil { - return "", err - } - switch reply := reply.(type) { - case []byte: - return string(reply), nil - case string: - return reply, nil - case nil: - return "", ErrNil - case Error: - return "", reply - } - return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) -} - -// Bytes is a helper that converts a command reply to a slice of bytes. If err -// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts -// the reply to a slice of bytes as follows: -// -// Reply type Result -// bulk string reply, nil -// simple string []byte(reply), nil -// nil nil, ErrNil -// other nil, error -func Bytes(reply interface{}, err error) ([]byte, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []byte: - return reply, nil - case string: - return []byte(reply), nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) -} - -// Bool is a helper that converts a command reply to a boolean. If err is not -// equal to nil, then Bool returns false, err. Otherwise Bool converts the -// reply to boolean as follows: -// -// Reply type Result -// integer value != 0, nil -// bulk string strconv.ParseBool(reply) -// nil false, ErrNil -// other false, error -func Bool(reply interface{}, err error) (bool, error) { - if err != nil { - return false, err - } - switch reply := reply.(type) { - case int64: - return reply != 0, nil - case []byte: - return strconv.ParseBool(string(reply)) - case nil: - return false, ErrNil - case Error: - return false, reply - } - return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) -} - -// MultiBulk is a helper that converts an array command reply to a []interface{}. -// -// Deprecated: Use Values instead. -func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } - -// Values is a helper that converts an array command reply to a []interface{}. -// If err is not equal to nil, then Values returns nil, err. Otherwise, Values -// converts the reply as follows: -// -// Reply type Result -// array reply, nil -// nil nil, ErrNil -// other nil, error -func Values(reply interface{}, err error) ([]interface{}, error) { - if err != nil { - return nil, err - } - switch reply := reply.(type) { - case []interface{}: - return reply, nil - case nil: - return nil, ErrNil - case Error: - return nil, reply - } - return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) -} - -func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { - if err != nil { - return err - } - switch reply := reply.(type) { - case []interface{}: - makeSlice(len(reply)) - for i := range reply { - if reply[i] == nil { - continue - } - if err := assign(i, reply[i]); err != nil { - return err - } - } - return nil - case nil: - return ErrNil - case Error: - return reply - } - return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) -} - -// Float64s is a helper that converts an array command reply to a []float64. If -// err is not equal to nil, then Float64s returns nil, err. Nil array items are -// converted to 0 in the output slice. Floats64 returns an error if an array -// item is not a bulk string or nil. -func Float64s(reply interface{}, err error) ([]float64, error) { - var result []float64 - err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) - } - f, err := strconv.ParseFloat(string(p), 64) - result[i] = f - return err - }) - return result, err -} - -// Strings is a helper that converts an array command reply to a []string. If -// err is not equal to nil, then Strings returns nil, err. Nil array items are -// converted to "" in the output slice. Strings returns an error if an array -// item is not a bulk string or nil. -func Strings(reply interface{}, err error) ([]string, error) { - var result []string - err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case string: - result[i] = v - return nil - case []byte: - result[i] = string(v) - return nil - default: - return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) - } - }) - return result, err -} - -// ByteSlices is a helper that converts an array command reply to a [][]byte. -// If err is not equal to nil, then ByteSlices returns nil, err. Nil array -// items are stay nil. ByteSlices returns an error if an array item is not a -// bulk string or nil. -func ByteSlices(reply interface{}, err error) ([][]byte, error) { - var result [][]byte - err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { - p, ok := v.([]byte) - if !ok { - return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) - } - result[i] = p - return nil - }) - return result, err -} - -// Int64s is a helper that converts an array command reply to a []int64. -// If err is not equal to nil, then Int64s returns nil, err. Nil array -// items are stay nil. Int64s returns an error if an array item is not a -// bulk string or nil. -func Int64s(reply interface{}, err error) ([]int64, error) { - var result []int64 - err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - result[i] = v - return nil - case []byte: - n, err := strconv.ParseInt(string(v), 10, 64) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) - } - }) - return result, err -} - -// Ints is a helper that converts an array command reply to a []in. -// If err is not equal to nil, then Ints returns nil, err. Nil array -// items are stay nil. Ints returns an error if an array item is not a -// bulk string or nil. -func Ints(reply interface{}, err error) ([]int, error) { - var result []int - err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { - switch v := v.(type) { - case int64: - n := int(v) - if int64(n) != v { - return strconv.ErrRange - } - result[i] = n - return nil - case []byte: - n, err := strconv.Atoi(string(v)) - result[i] = n - return err - default: - return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) - } - }) - return result, err -} - -// StringMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. -// Requires an even number of values in result. -func StringMap(result interface{}, err error) (map[string]string, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: StringMap expects even number of values result") - } - m := make(map[string]string, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, okKey := values[i].([]byte) - value, okValue := values[i+1].([]byte) - if !okKey || !okValue { - return nil, errors.New("redigo: StringMap key not a bulk string value") - } - m[string(key)] = string(value) - } - return m, nil -} - -// IntMap is a helper that converts an array of strings (alternating key, value) -// into a map[string]int. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func IntMap(result interface{}, err error) (map[string]int, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: IntMap expects even number of values result") - } - m := make(map[string]int, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: IntMap key not a bulk string value") - } - value, err := Int(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Int64Map is a helper that converts an array of strings (alternating key, value) -// into a map[string]int64. The HGETALL commands return replies in this format. -// Requires an even number of values in result. -func Int64Map(result interface{}, err error) (map[string]int64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - if len(values)%2 != 0 { - return nil, errors.New("redigo: Int64Map expects even number of values result") - } - m := make(map[string]int64, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].([]byte) - if !ok { - return nil, errors.New("redigo: Int64Map key not a bulk string value") - } - value, err := Int64(values[i+1], nil) - if err != nil { - return nil, err - } - m[string(key)] = value - } - return m, nil -} - -// Positions is a helper that converts an array of positions (lat, long) -// into a [][2]float64. The GEOPOS command returns replies in this format. -func Positions(result interface{}, err error) ([]*[2]float64, error) { - values, err := Values(result, err) - if err != nil { - return nil, err - } - positions := make([]*[2]float64, len(values)) - for i := range values { - if values[i] == nil { - continue - } - p, ok := values[i].([]interface{}) - if !ok { - return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) - } - if len(p) != 2 { - return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) - } - lat, err := Float64(p[0], nil) - if err != nil { - return nil, err - } - long, err := Float64(p[1], nil) - if err != nil { - return nil, err - } - positions[i] = &[2]float64{lat, long} - } - return positions, nil -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/scan.go b/src/vendor/github.com/garyburd/redigo/redis/scan.go deleted file mode 100644 index ef9551bd4..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/scan.go +++ /dev/null @@ -1,585 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "sync" -) - -func ensureLen(d reflect.Value, n int) { - if n > d.Cap() { - d.Set(reflect.MakeSlice(d.Type(), n, n)) - } else { - d.SetLen(n) - } -} - -func cannotConvert(d reflect.Value, s interface{}) error { - var sname string - switch s.(type) { - case string: - sname = "Redis simple string" - case Error: - sname = "Redis error" - case int64: - sname = "Redis integer" - case []byte: - sname = "Redis bulk string" - case []interface{}: - sname = "Redis array" - default: - sname = reflect.TypeOf(s).String() - } - return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) -} - -func convertAssignBulkString(d reflect.Value, s []byte) (err error) { - switch d.Type().Kind() { - case reflect.Float32, reflect.Float64: - var x float64 - x, err = strconv.ParseFloat(string(s), d.Type().Bits()) - d.SetFloat(x) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var x int64 - x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) - d.SetInt(x) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - var x uint64 - x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) - d.SetUint(x) - case reflect.Bool: - var x bool - x, err = strconv.ParseBool(string(s)) - d.SetBool(x) - case reflect.String: - d.SetString(string(s)) - case reflect.Slice: - if d.Type().Elem().Kind() != reflect.Uint8 { - err = cannotConvert(d, s) - } else { - d.SetBytes(s) - } - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignInt(d reflect.Value, s int64) (err error) { - switch d.Type().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - d.SetInt(s) - if d.Int() != s { - err = strconv.ErrRange - d.SetInt(0) - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if s < 0 { - err = strconv.ErrRange - } else { - x := uint64(s) - d.SetUint(x) - if d.Uint() != x { - err = strconv.ErrRange - d.SetUint(0) - } - } - case reflect.Bool: - d.SetBool(s != 0) - default: - err = cannotConvert(d, s) - } - return -} - -func convertAssignValue(d reflect.Value, s interface{}) (err error) { - if d.Kind() != reflect.Ptr { - if d.CanAddr() { - d2 := d.Addr() - if d2.CanInterface() { - if scanner, ok := d2.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - } - } else if d.CanInterface() { - // Already a reflect.Ptr - if d.IsNil() { - d.Set(reflect.New(d.Type().Elem())) - } - if scanner, ok := d.Interface().(Scanner); ok { - return scanner.RedisScan(s) - } - } - - switch s := s.(type) { - case []byte: - err = convertAssignBulkString(d, s) - case int64: - err = convertAssignInt(d, s) - default: - err = cannotConvert(d, s) - } - return err -} - -func convertAssignArray(d reflect.Value, s []interface{}) error { - if d.Type().Kind() != reflect.Slice { - return cannotConvert(d, s) - } - ensureLen(d, len(s)) - for i := 0; i < len(s); i++ { - if err := convertAssignValue(d.Index(i), s[i]); err != nil { - return err - } - } - return nil -} - -func convertAssign(d interface{}, s interface{}) (err error) { - if scanner, ok := d.(Scanner); ok { - return scanner.RedisScan(s) - } - - // Handle the most common destination types using type switches and - // fall back to reflection for all other types. - switch s := s.(type) { - case nil: - // ignore - case []byte: - switch d := d.(type) { - case *string: - *d = string(s) - case *int: - *d, err = strconv.Atoi(string(s)) - case *bool: - *d, err = strconv.ParseBool(string(s)) - case *[]byte: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignBulkString(d.Elem(), s) - } - } - case int64: - switch d := d.(type) { - case *int: - x := int(s) - if int64(x) != s { - err = strconv.ErrRange - x = 0 - } - *d = x - case *bool: - *d = s != 0 - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignInt(d.Elem(), s) - } - } - case string: - switch d := d.(type) { - case *string: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - case []interface{}: - switch d := d.(type) { - case *[]interface{}: - *d = s - case *interface{}: - *d = s - case nil: - // skip value - default: - if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { - err = cannotConvert(d, s) - } else { - err = convertAssignArray(d.Elem(), s) - } - } - case Error: - err = s - default: - err = cannotConvert(reflect.ValueOf(d), s) - } - return -} - -// Scan copies from src to the values pointed at by dest. -// -// Scan uses RedisScan if available otherwise: -// -// The values pointed at by dest must be an integer, float, boolean, string, -// []byte, interface{} or slices of these types. Scan uses the standard strconv -// package to convert bulk strings to numeric and boolean types. -// -// If a dest value is nil, then the corresponding src value is skipped. -// -// If a src element is nil, then the corresponding dest value is not modified. -// -// To enable easy use of Scan in a loop, Scan returns the slice of src -// following the copied values. -func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { - if len(src) < len(dest) { - return nil, errors.New("redigo.Scan: array short") - } - var err error - for i, d := range dest { - err = convertAssign(d, src[i]) - if err != nil { - err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) - break - } - } - return src[len(dest):], err -} - -type fieldSpec struct { - name string - index []int - omitEmpty bool -} - -type structSpec struct { - m map[string]*fieldSpec - l []*fieldSpec -} - -func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { - return ss.m[string(name)] -} - -func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - switch { - case f.PkgPath != "" && !f.Anonymous: - // Ignore unexported fields. - case f.Anonymous: - // TODO: Handle pointers. Requires change to decoder and - // protection against infinite recursion. - if f.Type.Kind() == reflect.Struct { - compileStructSpec(f.Type, depth, append(index, i), ss) - } - default: - fs := &fieldSpec{name: f.Name} - tag := f.Tag.Get("redis") - p := strings.Split(tag, ",") - if len(p) > 0 { - if p[0] == "-" { - continue - } - if len(p[0]) > 0 { - fs.name = p[0] - } - for _, s := range p[1:] { - switch s { - case "omitempty": - fs.omitEmpty = true - default: - panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) - } - } - } - d, found := depth[fs.name] - if !found { - d = 1 << 30 - } - switch { - case len(index) == d: - // At same depth, remove from result. - delete(ss.m, fs.name) - j := 0 - for i := 0; i < len(ss.l); i++ { - if fs.name != ss.l[i].name { - ss.l[j] = ss.l[i] - j += 1 - } - } - ss.l = ss.l[:j] - case len(index) < d: - fs.index = make([]int, len(index)+1) - copy(fs.index, index) - fs.index[len(index)] = i - depth[fs.name] = len(index) - ss.m[fs.name] = fs - ss.l = append(ss.l, fs) - } - } - } -} - -var ( - structSpecMutex sync.RWMutex - structSpecCache = make(map[reflect.Type]*structSpec) - defaultFieldSpec = &fieldSpec{} -) - -func structSpecForType(t reflect.Type) *structSpec { - - structSpecMutex.RLock() - ss, found := structSpecCache[t] - structSpecMutex.RUnlock() - if found { - return ss - } - - structSpecMutex.Lock() - defer structSpecMutex.Unlock() - ss, found = structSpecCache[t] - if found { - return ss - } - - ss = &structSpec{m: make(map[string]*fieldSpec)} - compileStructSpec(t, make(map[string]int), nil, ss) - structSpecCache[t] = ss - return ss -} - -var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") - -// ScanStruct scans alternating names and values from src to a struct. The -// HGETALL and CONFIG GET commands return replies in this format. -// -// ScanStruct uses exported field names to match values in the response. Use -// 'redis' field tag to override the name: -// -// Field int `redis:"myName"` -// -// Fields with the tag redis:"-" are ignored. -// -// Each field uses RedisScan if available otherwise: -// Integer, float, boolean, string and []byte fields are supported. Scan uses the -// standard strconv package to convert bulk string values to numeric and -// boolean types. -// -// If a src element is nil, then the corresponding field is not modified. -func ScanStruct(src []interface{}, dest interface{}) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanStructValue - } - d = d.Elem() - if d.Kind() != reflect.Struct { - return errScanStructValue - } - ss := structSpecForType(d.Type()) - - if len(src)%2 != 0 { - return errors.New("redigo.ScanStruct: number of values not a multiple of 2") - } - - for i := 0; i < len(src); i += 2 { - s := src[i+1] - if s == nil { - continue - } - name, ok := src[i].([]byte) - if !ok { - return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) - } - fs := ss.fieldSpec(name) - if fs == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) - } - } - return nil -} - -var ( - errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") -) - -// ScanSlice scans src to the slice pointed to by dest. The elements the dest -// slice must be integer, float, boolean, string, struct or pointer to struct -// values. -// -// Struct fields must be integer, float, boolean or string values. All struct -// fields are used unless a subset is specified using fieldNames. -func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { - d := reflect.ValueOf(dest) - if d.Kind() != reflect.Ptr || d.IsNil() { - return errScanSliceValue - } - d = d.Elem() - if d.Kind() != reflect.Slice { - return errScanSliceValue - } - - isPtr := false - t := d.Type().Elem() - if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { - isPtr = true - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - ensureLen(d, len(src)) - for i, s := range src { - if s == nil { - continue - } - if err := convertAssignValue(d.Index(i), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) - } - } - return nil - } - - ss := structSpecForType(t) - fss := ss.l - if len(fieldNames) > 0 { - fss = make([]*fieldSpec, len(fieldNames)) - for i, name := range fieldNames { - fss[i] = ss.m[name] - if fss[i] == nil { - return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) - } - } - } - - if len(fss) == 0 { - return errors.New("redigo.ScanSlice: no struct fields") - } - - n := len(src) / len(fss) - if n*len(fss) != len(src) { - return errors.New("redigo.ScanSlice: length not a multiple of struct field count") - } - - ensureLen(d, n) - for i := 0; i < n; i++ { - d := d.Index(i) - if isPtr { - if d.IsNil() { - d.Set(reflect.New(t)) - } - d = d.Elem() - } - for j, fs := range fss { - s := src[i*len(fss)+j] - if s == nil { - continue - } - if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { - return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) - } - } - } - return nil -} - -// Args is a helper for constructing command arguments from structured values. -type Args []interface{} - -// Add returns the result of appending value to args. -func (args Args) Add(value ...interface{}) Args { - return append(args, value...) -} - -// AddFlat returns the result of appending the flattened value of v to args. -// -// Maps are flattened by appending the alternating keys and map values to args. -// -// Slices are flattened by appending the slice elements to args. -// -// Structs are flattened by appending the alternating names and values of -// exported fields to args. If v is a nil struct pointer, then nothing is -// appended. The 'redis' field tag overrides struct field names. See ScanStruct -// for more information on the use of the 'redis' field tag. -// -// Other types are appended to args as is. -func (args Args) AddFlat(v interface{}) Args { - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Struct: - args = flattenStruct(args, rv) - case reflect.Slice: - for i := 0; i < rv.Len(); i++ { - args = append(args, rv.Index(i).Interface()) - } - case reflect.Map: - for _, k := range rv.MapKeys() { - args = append(args, k.Interface(), rv.MapIndex(k).Interface()) - } - case reflect.Ptr: - if rv.Type().Elem().Kind() == reflect.Struct { - if !rv.IsNil() { - args = flattenStruct(args, rv.Elem()) - } - } else { - args = append(args, v) - } - default: - args = append(args, v) - } - return args -} - -func flattenStruct(args Args, v reflect.Value) Args { - ss := structSpecForType(v.Type()) - for _, fs := range ss.l { - fv := v.FieldByIndex(fs.index) - if fs.omitEmpty { - var empty = false - switch fv.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - empty = fv.Len() == 0 - case reflect.Bool: - empty = !fv.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - empty = fv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - empty = fv.Uint() == 0 - case reflect.Float32, reflect.Float64: - empty = fv.Float() == 0 - case reflect.Interface, reflect.Ptr: - empty = fv.IsNil() - } - if empty { - continue - } - } - args = append(args, fs.name, fv.Interface()) - } - return args -} diff --git a/src/vendor/github.com/garyburd/redigo/redis/script.go b/src/vendor/github.com/garyburd/redigo/redis/script.go deleted file mode 100644 index 0ef1c821f..000000000 --- a/src/vendor/github.com/garyburd/redigo/redis/script.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 Gary Burd -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package redis - -import ( - "crypto/sha1" - "encoding/hex" - "io" - "strings" -) - -// Script encapsulates the source, hash and key count for a Lua script. See -// http://redis.io/commands/eval for information on scripts in Redis. -type Script struct { - keyCount int - src string - hash string -} - -// NewScript returns a new script object. If keyCount is greater than or equal -// to zero, then the count is automatically inserted in the EVAL command -// argument list. If keyCount is less than zero, then the application supplies -// the count as the first value in the keysAndArgs argument to the Do, Send and -// SendHash methods. -func NewScript(keyCount int, src string) *Script { - h := sha1.New() - io.WriteString(h, src) - return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} -} - -func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { - var args []interface{} - if s.keyCount < 0 { - args = make([]interface{}, 1+len(keysAndArgs)) - args[0] = spec - copy(args[1:], keysAndArgs) - } else { - args = make([]interface{}, 2+len(keysAndArgs)) - args[0] = spec - args[1] = s.keyCount - copy(args[2:], keysAndArgs) - } - return args -} - -// Hash returns the script hash. -func (s *Script) Hash() string { - return s.hash -} - -// Do evaluates the script. Under the covers, Do optimistically evaluates the -// script using the EVALSHA command. If the command fails because the script is -// not loaded, then Do evaluates the script using the EVAL command (thus -// causing the script to load). -func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { - v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) - if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { - v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) - } - return v, err -} - -// SendHash evaluates the script without waiting for the reply. The script is -// evaluated with the EVALSHA command. The application must ensure that the -// script is loaded by a previous call to Send, Do or Load methods. -func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) -} - -// Send evaluates the script without waiting for the reply. -func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { - return c.Send("EVAL", s.args(s.src, keysAndArgs)...) -} - -// Load loads the script without evaluating it. -func (s *Script) Load(c Conn) error { - _, err := c.Do("SCRIPT", "LOAD", s.src) - return err -} diff --git a/src/vendor/github.com/go-redis/redis/.gitignore b/src/vendor/github.com/go-redis/redis/.gitignore new file mode 100644 index 000000000..ebfe903bc --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/.gitignore @@ -0,0 +1,2 @@ +*.rdb +testdata/*/ diff --git a/src/vendor/github.com/go-redis/redis/.travis.yml b/src/vendor/github.com/go-redis/redis/.travis.yml new file mode 100644 index 000000000..632feca06 --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/.travis.yml @@ -0,0 +1,21 @@ +sudo: false +language: go + +services: + - redis-server + +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip + +matrix: + allow_failures: + - go: tip + +install: + - go get github.com/onsi/ginkgo + - go get github.com/onsi/gomega diff --git a/src/vendor/github.com/go-redis/redis/CHANGELOG.md b/src/vendor/github.com/go-redis/redis/CHANGELOG.md new file mode 100644 index 000000000..19645661a --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog + +## Unreleased + +- Cluster and Ring pipelines process commands for each node in its own goroutine. + +## 6.14 + +- Added Options.MinIdleConns. +- Added Options.MaxConnAge. +- PoolStats.FreeConns is renamed to PoolStats.IdleConns. +- Add Client.Do to simplify creating custom commands. +- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers. +- Lower memory usage. + +## v6.13 + +- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards. +- Cluster client was optimized to use much less memory when reloading cluster state. +- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead. +- Dialer.KeepAlive is set to 5 minutes by default. + +## v6.12 + +- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup diff --git a/src/vendor/github.com/go-redis/redis/LICENSE b/src/vendor/github.com/go-redis/redis/LICENSE new file mode 100644 index 000000000..298bed9be --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013 The github.com/go-redis/redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/vendor/github.com/go-redis/redis/Makefile b/src/vendor/github.com/go-redis/redis/Makefile new file mode 100644 index 000000000..1fbdac91c --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/Makefile @@ -0,0 +1,20 @@ +all: testdeps + go test ./... + go test ./... -short -race + env GOOS=linux GOARCH=386 go test ./... + go vet + +testdeps: testdata/redis/src/redis-server + +bench: testdeps + go test ./... -test.run=NONE -test.bench=. -test.benchmem + +.PHONY: all test testdeps bench + +testdata/redis: + mkdir -p $@ + wget -qO- https://github.com/antirez/redis/archive/unstable.tar.gz | tar xvz --strip-components=1 -C $@ + +testdata/redis/src/redis-server: testdata/redis + sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $ +} + +func ExampleClient() { + err := client.Set("key", "value", 0).Err() + if err != nil { + panic(err) + } + + val, err := client.Get("key").Result() + if err != nil { + panic(err) + } + fmt.Println("key", val) + + val2, err := client.Get("key2").Result() + if err == redis.Nil { + fmt.Println("key2 does not exist") + } else if err != nil { + panic(err) + } else { + fmt.Println("key2", val2) + } + // Output: key value + // key2 does not exist +} +``` + +## Howto + +Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package. + +## Look and feel + +Some corner cases: + +```go +// SET key value EX 10 NX +set, err := client.SetNX("key", "value", 10*time.Second).Result() + +// SORT list LIMIT 0 2 ASC +vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() + +// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 +vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{ + Min: "-inf", + Max: "+inf", + Offset: 0, + Count: 2, +}).Result() + +// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM +vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result() + +// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" +vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() +``` + +## Benchmark + +go-redis vs redigo: + +``` +BenchmarkSetGoRedis10Conns64Bytes-4 200000 7621 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns64Bytes-4 200000 7554 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1KB-4 200000 7697 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1KB-4 200000 7688 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns10KB-4 200000 9214 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns10KB-4 200000 9181 ns/op 210 B/op 6 allocs/op +BenchmarkSetGoRedis10Conns1MB-4 2000 583242 ns/op 2337 B/op 6 allocs/op +BenchmarkSetGoRedis100Conns1MB-4 2000 583089 ns/op 2338 B/op 6 allocs/op +BenchmarkSetRedigo10Conns64Bytes-4 200000 7576 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns64Bytes-4 200000 7782 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1KB-4 200000 7958 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1KB-4 200000 7725 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns10KB-4 100000 18442 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo100Conns10KB-4 100000 18818 ns/op 208 B/op 7 allocs/op +BenchmarkSetRedigo10Conns1MB-4 2000 668829 ns/op 226 B/op 7 allocs/op +BenchmarkSetRedigo100Conns1MB-4 2000 679542 ns/op 226 B/op 7 allocs/op +``` + +Redis Cluster: + +``` +BenchmarkRedisPing-4 200000 6983 ns/op 116 B/op 4 allocs/op +BenchmarkRedisClusterPing-4 100000 11535 ns/op 117 B/op 4 allocs/op +``` + +## See also + +- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) +- [Golang msgpack](https://github.com/vmihailenco/msgpack) +- [Golang message task queue](https://github.com/go-msgqueue/msgqueue) diff --git a/src/vendor/github.com/go-redis/redis/cluster.go b/src/vendor/github.com/go-redis/redis/cluster.go new file mode 100644 index 000000000..55bc5bae7 --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/cluster.go @@ -0,0 +1,1649 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "math" + "math/rand" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/hashtag" + "github.com/go-redis/redis/internal/pool" + "github.com/go-redis/redis/internal/proto" + "github.com/go-redis/redis/internal/singleflight" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 8 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func() ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + OnConnect func(*Conn) error + + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 8 + } + + if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.NumCPU() + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + OnConnect: opt.OnConnect, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + Password: opt.Password, + readOnly: opt.ReadOnly, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + loading uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const probes = 10 + + var latency uint32 + for i := 0; i < probes; i++ { + start := time.Now() + n.Client.Ping() + probe := uint32(time.Since(start) / time.Microsecond) + latency = (latency + probe) / 2 + } + atomic.StoreUint32(&n.latency, latency) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsLoading() { + atomic.StoreUint32(&n.loading, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Loading() bool { + const minute = int64(time.Minute / time.Second) + + loading := atomic.LoadUint32(&n.loading) + if loading == 0 { + return false + } + if time.Now().Unix()-int64(loading) < minute { + return true + } + atomic.StoreUint32(&n.loading, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + allAddrs []string + allNodes map[string]*clusterNode + clusterAddrs []string + closed bool + + nodeCreateGroup singleflight.Group + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + allAddrs: opt.Addrs, + allNodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.allNodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.allNodes = nil + c.clusterAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + c.mu.RLock() + closed := c.closed + if !closed { + if len(c.clusterAddrs) > 0 { + addrs = c.clusterAddrs + } else { + addrs = c.allAddrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + var collected []*clusterNode + c.mu.Lock() + for addr, node := range c.allNodes { + if node.Generation() >= generation { + continue + } + + c.clusterAddrs = remove(c.clusterAddrs, addr) + delete(c.allNodes, addr) + collected = append(collected, node) + } + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) Get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.allNodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.Get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + v, err := c.nodeCreateGroup.Do(addr, func() (interface{}, error) { + node := newClusterNode(c.opt, addr) + return node, nil + }) + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.allNodes[addr] + if ok { + _ = v.(*clusterNode).Close() + return node, err + } + node = v.(*clusterNode) + + c.allAddrs = appendIfNotExists(c.allAddrs, addr) + if err == nil { + c.clusterAddrs = append(c.clusterAddrs, addr) + } + c.allNodes[addr] = node + + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.allNodes)) + for _, node := range c.allNodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Loading() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Loading() { + break + } + } + return slave, nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + const threshold = time.Millisecond + + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Loading() { + continue + } + if node == nil || node.Latency()-n.Latency() > threshold { + node = n + } + } + return node, nil +} + +func (c *clusterState) slotRandomNode(slot int) *clusterNode { + nodes := c.slotNodes(slot) + n := rand.Intn(len(nodes)) + return nodes[n] +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +func (c *clusterState) IsConsistent() bool { + if c.nodes.opt.ClusterSlots != nil { + return true + } + return len(c.Masters) <= len(c.Slaves) +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func() (*clusterState, error) + + state atomic.Value + + firstErrMu sync.RWMutex + firstErr error + + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload() (*clusterState, error) { + state, err := c.reload() + if err != nil { + return nil, err + } + if !state.IsConsistent() { + time.AfterFunc(time.Second, c.LazyReload) + } + return state, nil +} + +func (c *clusterStateHolder) reload() (*clusterState, error) { + state, err := c.load() + if err != nil { + c.firstErrMu.Lock() + if c.firstErr == nil { + c.firstErr = err + } + c.firstErrMu.Unlock() + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + for { + state, err := c.reload() + if err != nil { + return + } + time.Sleep(100 * time.Millisecond) + if state.IsConsistent() { + return + } + } + }() +} + +func (c *clusterStateHolder) Get() (*clusterState, error) { + v := c.state.Load() + if v != nil { + state := v.(*clusterState) + if time.Since(state.createdAt) > time.Minute { + c.LazyReload() + } + return state, nil + } + + c.firstErrMu.RLock() + err := c.firstErr + c.firstErrMu.RUnlock() + if err != nil { + return nil, err + } + + return nil, errors.New("redis: cluster has no state") +} + +func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) { + state, err := c.Reload() + if err == nil { + return state, nil + } + return c.Get() +} + +//------------------------------------------------------------------------------ + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + cmdable + + ctx context.Context + + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder + cmdsInfoCache *cmdsInfoCache + + process func(Cmder) error + processPipeline func([]Cmder) error + processTxPipeline func([]Cmder) error +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + + c.process = c.defaultProcess + c.processPipeline = c.defaultProcessPipeline + c.processTxPipeline = c.defaultProcessTxPipeline + + c.init() + + _, _ = c.state.Reload() + _, _ = c.cmdsInfoCache.Get() + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +// ReloadState reloads cluster state. It calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState() error { + _, err := c.state.Reload() + return err +} + +func (c *ClusterClient) init() { + c.cmdable.setProcessor(c.Process) +} + +func (c *ClusterClient) Context() context.Context { + if c.ctx != nil { + return c.ctx + } + return context.Background() +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + c2 := c.copy() + c2.ctx = ctx + return c2 +} + +func (c *ClusterClient) copy() *ClusterClient { + cp := *c + cp.init() + return &cp +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) { + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.Get(addr) + if err != nil { + return nil, err + } + if node == nil { + continue + } + + info, err := node.Client.Command().Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get() + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logf("info for cmd=%s not found", name) + } + return info +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return 0, nil, err + } + + cmdInfo := c.cmdInfo(cmd.Name()) + slot := cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + if c.opt.RouteByLatency { + node, err := state.slotClosestNode(slot) + return slot, node, err + } + + if c.opt.RouteRandomly { + node := state.slotRandomNode(slot) + return slot, node, nil + } + + node, err := state.slotSlaveNode(slot) + return slot, node, err + } + + node, err := state.slotMasterNode(slot) + return slot, node, err +} + +func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) { + state, err := c.state.Get() + if err != nil { + return nil, err + } + + nodes := state.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + err = node.Client.Watch(fn, keys...) + if err == nil { + break + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + continue + } + + moved, ask, addr := internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if err == pool.ErrClosed { + node, err = c.slotMasterNode(slot) + if err != nil { + return err + } + continue + } + + return err + } + + return err +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(args ...interface{}) *Cmd { + cmd := NewCmd(args...) + c.Process(cmd) + return cmd +} + +func (c *ClusterClient) WrapProcess( + fn func(oldProcess func(Cmder) error) func(Cmder) error, +) { + c.process = fn(c.process) +} + +func (c *ClusterClient) Process(cmd Cmder) error { + return c.process(cmd) +} + +func (c *ClusterClient) defaultProcess(cmd Cmder) error { + var node *clusterNode + var ask bool + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + if node == nil { + var err error + _, node, err = c.cmdSlotAndNode(cmd) + if err != nil { + cmd.setErr(err) + break + } + } + + var err error + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(NewCmd("ASKING")) + _ = pipe.Process(cmd) + _, err = pipe.Exec() + _ = pipe.Close() + ask = false + } else { + err = node.Client.Process(cmd) + } + + // If there is no error - we are done. + if err == nil { + break + } + + // If slave is loading - read from master. + if c.opt.ReadOnly && internal.IsLoadingError(err) { + node.MarkAsLoading() + continue + } + + if internal.IsRetryableError(err, true) { + c.state.LazyReload() + + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try random node. + node, err = c.nodes.Random() + if err != nil { + break + } + continue + } + + var moved bool + var addr string + moved, ask, addr = internal.IsMovedError(err) + if moved || ask { + c.state.LazyReload() + + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + break + } + continue + } + + if err == pool.ErrClosed { + node = nil + continue + } + + break + } + + return cmd.Err() +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachNode concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error { + state, err := c.state.ReloadOrGet() + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get() + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState() (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots() + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + for _, addr := range addrs { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots().Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logf("ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(fn) +} + +func (c *ClusterClient) WrapProcessPipeline( + fn func(oldProcess func([]Cmder) error) func([]Cmder) error, +) { + c.processPipeline = fn(c.processPipeline) +} + +func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(cmds, cmdsMap) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.pipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (c *ClusterClient) mapCmdsByNode(cmds []Cmder, cmdsMap *cmdsMap) error { + state, err := c.state.Get() + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsAreReadOnly := c.cmdsAreReadOnly(cmds) + for _, cmd := range cmds { + var node *clusterNode + var err error + if cmdsAreReadOnly { + _, node, err = c.cmdSlotAndNode(cmd) + } else { + slot := c.cmdSlot(cmd) + node, err = state.slotMasterNode(slot) + } + if err != nil { + return err + } + cmdsMap.mu.Lock() + cmdsMap.m[node] = append(cmdsMap.m[node], cmd) + cmdsMap.mu.Unlock() + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) pipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmds...) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(rd, cmds, failedCmds) + }) + return err +} + +func (c *ClusterClient) pipelineReadCmds( + rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) { + continue + } + + if internal.IsRedisError(err) { + continue + } + + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := internal.IsMovedError(err) + + if moved { + c.state.LazyReload() + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], cmd) + failedCmds.mu.Unlock() + return true + } + + if ask { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + failedCmds.mu.Lock() + failedCmds.m[node] = append(failedCmds.m[node], NewCmd("ASKING"), cmd) + failedCmds.mu.Unlock() + return true + } + + return false +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + exec: c.processTxPipeline, + } + pipe.statefulCmdable.setProcessor(pipe.Process) + return &pipe +} + +func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(fn) +} + +func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error { + state, err := c.state.Get() + if err != nil { + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + time.Sleep(c.retryBackoff(attempt)) + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + cn, err := node.Client.getConn() + if err != nil { + if err == pool.ErrClosed { + c.mapCmdsByNode(cmds, failedCmds) + } else { + setCmdsErr(cmds, err) + } + return + } + + err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds) + node.Client.releaseConnStrict(cn, err) + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) txPipelineProcessCmds( + node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap, +) error { + err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error { + return txPipelineWriteMulti(wr, cmds) + }) + if err != nil { + setCmdsErr(cmds, err) + failedCmds.mu.Lock() + failedCmds.m[node] = cmds + failedCmds.mu.Unlock() + return err + } + + err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error { + err := c.txPipelineReadQueued(rd, cmds, failedCmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + return pipelineReadCmds(rd, cmds) + }) + return err +} + +func (c *ClusterClient) txPipelineReadQueued( + rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap, +) error { + // Parse queued replies. + var statusCmd StatusCmd + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil { + continue + } + + if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) { + continue + } + + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + err := proto.ParseErrorReply(line) + for _, cmd := range cmds { + if !c.checkMovedErr(cmd, err, failedCmds) { + break + } + } + return err + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +func (c *ClusterClient) pubSub(channels []string) *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(channels []string) (*pool.Conn, error) { + if node == nil { + var slot int + if len(channels) > 0 { + slot = hashtag.Slot(channels[0]) + } else { + slot = -1 + } + + masterNode, err := c.slotMasterNode(slot) + if err != nil { + return nil, err + } + node = masterNode + } + return node.Client.newConn() + }, + closeConn: func(cn *pool.Conn) error { + return node.Client.connPool.CloseConn(cn) + }, + } + pubsub.init() + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.Subscribe(channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(channels ...string) *PubSub { + pubsub := c.pubSub(channels) + if len(channels) > 0 { + _ = pubsub.PSubscribe(channels...) + } + return pubsub +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +func remove(ss []string, es ...string) []string { + if len(es) == 0 { + return ss[:0] + } + for _, e := range es { + for i, s := range ss { + if s == e { + ss = append(ss[:i], ss[i+1:]...) + break + } + } + } + return ss +} diff --git a/src/vendor/github.com/go-redis/redis/cluster_commands.go b/src/vendor/github.com/go-redis/redis/cluster_commands.go new file mode 100644 index 000000000..dff62c902 --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/cluster_commands.go @@ -0,0 +1,22 @@ +package redis + +import "sync/atomic" + +func (c *ClusterClient) DBSize() *IntCmd { + cmd := NewIntCmd("dbsize") + var size int64 + err := c.ForEachMaster(func(master *Client) error { + n, err := master.DBSize().Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.setErr(err) + return cmd + } + cmd.val = size + return cmd +} diff --git a/src/vendor/github.com/go-redis/redis/command.go b/src/vendor/github.com/go-redis/redis/command.go new file mode 100644 index 000000000..05dd6755a --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/command.go @@ -0,0 +1,1874 @@ +package redis + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/internal" + "github.com/go-redis/redis/internal/proto" +) + +type Cmder interface { + Name() string + Args() []interface{} + stringArg(int) string + + readReply(rd *proto.Reader) error + setErr(error) + + readTimeout() *time.Duration + + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.setErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmds ...Cmder) error { + for _, cmd := range cmds { + err := wr.WriteArgs(cmd.Args()) + if err != nil { + return err + } + } + return nil +} + +func cmdString(cmd Cmder, val interface{}) string { + var ss []string + for _, arg := range cmd.Args() { + ss = append(ss, fmt.Sprint(arg)) + } + s := strings.Join(ss, " ") + if err := cmd.Err(); err != nil { + return s + ": " + err.Error() + } + if val != nil { + switch vv := val.(type) { + case []byte: + return s + ": " + string(vv) + default: + return s + ": " + fmt.Sprint(val) + } + } + return s + +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + } + if info == nil { + return 0 + } + return int(info.FirstKeyPos) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + _args []interface{} + err error + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd._args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd._args) { + return "" + } + s, _ := cmd._args[pos].(string) + return s +} + +func (cmd *baseCmd) Name() string { + if len(cmd._args) > 0 { + // Cmd name must be lower cased. + s := internal.ToLower(cmd.stringArg(0)) + cmd._args[0] = s + return s + } + return "" +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +func (cmd *baseCmd) setErr(e error) { + cmd.err = e +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) String() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + switch val := cmd.val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadReply(sliceParser) + return cmd.err +} + +// Implements proto.MultiBulkParse +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals = append(vals, nil) + continue + } + if err, ok := err.(proto.RedisError); ok { + vals = append(vals, err) + continue + } + return nil, err + } + + switch v := v.(type) { + case string: + vals = append(vals, v) + default: + vals = append(vals, v) + } + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(sliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadIntReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{_args: args}, + precision: precision, + } +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + var n int64 + n, cmd.err = rd.ReadIntReply() + if cmd.err != nil { + return cmd.err + } + cmd.val = time.Duration(n) * cmd.precision + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(timeParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(time.Time) + return nil +} + +// Implements proto.MultiBulkParse +func timeParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + return time.Unix(sec, microsec*1000), nil +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + // TODO: is this okay? + if cmd.err == Nil { + cmd.val = false + cmd.err = nil + return nil + } + if cmd.err != nil { + return cmd.err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + cmd.err = fmt.Errorf("got %T, wanted int64 or string", v) + return cmd.err + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return []byte(cmd.val), cmd.err +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadString() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) error { + cmd.val, cmd.err = rd.ReadFloatReply() + return cmd.err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ss := make([]string, 0, n) + for i := int64(0); i < n; i++ { + s, err := rd.ReadString() + if err == Nil { + ss = append(ss, "") + } else if err != nil { + return nil, err + } else { + ss = append(ss, s) + } + } + return ss, nil +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(boolSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]bool) + return nil +} + +// Implements proto.MultiBulkParse +func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + bools := make([]bool, 0, n) + for i := int64(0); i < n; i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + bools = append(bools, n == 1) + } + return bools, nil +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStringMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]string) + return nil +} + +// Implements proto.MultiBulkParse +func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringIntMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]int64) + return nil +} + +// Implements proto.MultiBulkParse +func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + m[key] = n + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(stringStructMapParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]struct{}) + return nil +} + +// Implements proto.MultiBulkParse +func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = struct{}{} + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xMessageSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XMessage) + return nil +} + +// Implements proto.MultiBulkParse +func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + msgs := make([]XMessage, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + return nil, err + } + + msgs = append(msgs, XMessage{ + ID: id, + Values: v.(map[string]interface{}), + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +// Implements proto.MultiBulkParse +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(xStreamSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]XStream) + return nil +} + +// Implements proto.MultiBulkParse +func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XStream, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + v, err := rd.ReadArrayReply(xMessageSliceParser) + if err != nil { + return nil, err + } + + ret = append(ret, XStream{ + Stream: stream, + Messages: v.([]XMessage), + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.(*XPending) + return nil +} + +func xPendingParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + pending := &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if pending.Consumers == nil { + pending.Consumers = make(map[string]int64) + } + pending.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return pending, nil +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + Id string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + var info interface{} + info, cmd.err = rd.ReadArrayReply(xPendingExtSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = info.([]XPendingExt) + return nil +} + +func xPendingExtSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + ret := make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + ret = append(ret, XPendingExt{ + Id: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return ret, nil +} + +//------------------------------------------------------------------------------ + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(zSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]Z) + return nil +} + +// Implements proto.MultiBulkParse +func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + zz := make([]Z, n/2) + for i := int64(0); i < n; i += 2 { + var err error + + z := &zz[i/2] + + z.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + z.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + return zz, nil +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process func(cmd Cmder) error +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{_args: args}, + process: process, + } +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) error { + cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply() + return cmd.err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + Id string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(clusterSlotsParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.([]ClusterSlot) + return nil +} + +// Implements proto.MultiBulkParse +func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) { + slots := make([]ClusterSlot, n) + for i := 0; i < len(slots); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].Id = id + } + } + + slots[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return slots, nil +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return &GeoLocationCmd{ + baseCmd: baseCmd{_args: args}, + q: q, + } +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if cmd.err != nil { + return cmd.err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + positions []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.positions +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.positions) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(geoPosSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.positions = v.([]*GeoPos) + return nil +} + +func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + positions := make([]*GeoPos, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(geoPosParser) + if err != nil { + if err == Nil { + positions = append(positions, nil) + continue + } + return nil, err + } + switch v := v.(type) { + case *GeoPos: + positions = append(positions, v) + default: + return nil, fmt.Errorf("got %T, expected *GeoPos", v) + } + } + return positions, nil +} + +func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) { + var pos GeoPos + var err error + + pos.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + pos.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return &pos, nil +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{_args: args}, + } +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + var v interface{} + v, cmd.err = rd.ReadArrayReply(commandInfoSliceParser) + if cmd.err != nil { + return cmd.err + } + cmd.val = v.(map[string]*CommandInfo) + return nil +} + +// Implements proto.MultiBulkParse +func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + m[vv.Name] = vv + + } + return m, nil +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + var cmd CommandInfo + var err error + + if n != 6 { + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n) + } + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + flags, err := rd.ReadReply(stringSliceParser) + if err != nil { + return nil, err + } + cmd.Flags = flags.([]string) + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func() (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn() + if err != nil { + return err + } + c.cmds = cmds + return nil + }) + return c.cmds, err +} diff --git a/src/vendor/github.com/go-redis/redis/commands.go b/src/vendor/github.com/go-redis/redis/commands.go new file mode 100644 index 000000000..e9a8992f6 --- /dev/null +++ b/src/vendor/github.com/go-redis/redis/commands.go @@ -0,0 +1,2498 @@ +package redis + +import ( + "errors" + "io" + "time" + + "github.com/go-redis/redis/internal" +) + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Millisecond, + ) + } + return int64(dur / time.Millisecond) +} + +func formatSec(dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logf( + "specified duration is %s, but minimal supported value is %s", + dur, time.Second, + ) + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + if ss, ok := src[0].([]string); ok { + for _, s := range ss { + dst = append(dst, s) + } + return dst + } + } + + for _, v := range src { + dst = append(dst, v) + } + return dst +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command() *CommandsInfoCmd + ClientGetName() *StringCmd + Echo(message interface{}) *StringCmd + Ping() *StatusCmd + Quit() *StatusCmd + Del(keys ...string) *IntCmd + Unlink(keys ...string) *IntCmd + Dump(key string) *StringCmd + Exists(keys ...string) *IntCmd + Expire(key string, expiration time.Duration) *BoolCmd + ExpireAt(key string, tm time.Time) *BoolCmd + Keys(pattern string) *StringSliceCmd + Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd + Move(key string, db int64) *BoolCmd + ObjectRefCount(key string) *IntCmd + ObjectEncoding(key string) *StringCmd + ObjectIdleTime(key string) *DurationCmd + Persist(key string) *BoolCmd + PExpire(key string, expiration time.Duration) *BoolCmd + PExpireAt(key string, tm time.Time) *BoolCmd + PTTL(key string) *DurationCmd + RandomKey() *StringCmd + Rename(key, newkey string) *StatusCmd + RenameNX(key, newkey string) *BoolCmd + Restore(key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd + Sort(key string, sort *Sort) *StringSliceCmd + SortStore(key, store string, sort *Sort) *IntCmd + SortInterfaces(key string, sort *Sort) *SliceCmd + Touch(keys ...string) *IntCmd + TTL(key string) *DurationCmd + Type(key string) *StatusCmd + Scan(cursor uint64, match string, count int64) *ScanCmd + SScan(key string, cursor uint64, match string, count int64) *ScanCmd + HScan(key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(key string, cursor uint64, match string, count int64) *ScanCmd + Append(key, value string) *IntCmd + BitCount(key string, bitCount *BitCount) *IntCmd + BitOpAnd(destKey string, keys ...string) *IntCmd + BitOpOr(destKey string, keys ...string) *IntCmd + BitOpXor(destKey string, keys ...string) *IntCmd + BitOpNot(destKey string, key string) *IntCmd + BitPos(key string, bit int64, pos ...int64) *IntCmd + Decr(key string) *IntCmd + DecrBy(key string, decrement int64) *IntCmd + Get(key string) *StringCmd + GetBit(key string, offset int64) *IntCmd + GetRange(key string, start, end int64) *StringCmd + GetSet(key string, value interface{}) *StringCmd + Incr(key string) *IntCmd + IncrBy(key string, value int64) *IntCmd + IncrByFloat(key string, value float64) *FloatCmd + MGet(keys ...string) *SliceCmd + MSet(pairs ...interface{}) *StatusCmd + MSetNX(pairs ...interface{}) *BoolCmd + Set(key string, value interface{}, expiration time.Duration) *StatusCmd + SetBit(key string, offset int64, value int) *IntCmd + SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(key string, offset int64, value string) *IntCmd + StrLen(key string) *IntCmd + HDel(key string, fields ...string) *IntCmd + HExists(key, field string) *BoolCmd + HGet(key, field string) *StringCmd + HGetAll(key string) *StringStringMapCmd + HIncrBy(key, field string, incr int64) *IntCmd + HIncrByFloat(key, field string, incr float64) *FloatCmd + HKeys(key string) *StringSliceCmd + HLen(key string) *IntCmd + HMGet(key string, fields ...string) *SliceCmd + HMSet(key string, fields map[string]interface{}) *StatusCmd + HSet(key, field string, value interface{}) *BoolCmd + HSetNX(key, field string, value interface{}) *BoolCmd + HVals(key string) *StringSliceCmd + BLPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(source, destination string, timeout time.Duration) *StringCmd + LIndex(key string, index int64) *StringCmd + LInsert(key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(key string, pivot, value interface{}) *IntCmd + LInsertAfter(key string, pivot, value interface{}) *IntCmd + LLen(key string) *IntCmd + LPop(key string) *StringCmd + LPush(key string, values ...interface{}) *IntCmd + LPushX(key string, value interface{}) *IntCmd + LRange(key string, start, stop int64) *StringSliceCmd + LRem(key string, count int64, value interface{}) *IntCmd + LSet(key string, index int64, value interface{}) *StatusCmd + LTrim(key string, start, stop int64) *StatusCmd + RPop(key string) *StringCmd + RPopLPush(source, destination string) *StringCmd + RPush(key string, values ...interface{}) *IntCmd + RPushX(key string, value interface{}) *IntCmd + SAdd(key string, members ...interface{}) *IntCmd + SCard(key string) *IntCmd + SDiff(keys ...string) *StringSliceCmd + SDiffStore(destination string, keys ...string) *IntCmd + SInter(keys ...string) *StringSliceCmd + SInterStore(destination string, keys ...string) *IntCmd + SIsMember(key string, member interface{}) *BoolCmd + SMembers(key string) *StringSliceCmd + SMembersMap(key string) *StringStructMapCmd + SMove(source, destination string, member interface{}) *BoolCmd + SPop(key string) *StringCmd + SPopN(key string, count int64) *StringSliceCmd + SRandMember(key string) *StringCmd + SRandMemberN(key string, count int64) *StringSliceCmd + SRem(key string, members ...interface{}) *IntCmd + SUnion(keys ...string) *StringSliceCmd + SUnionStore(destination string, keys ...string) *IntCmd + XAdd(a *XAddArgs) *StringCmd + XLen(stream string) *IntCmd + XRange(stream, start, stop string) *XMessageSliceCmd + XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(a *XReadArgs) *XStreamSliceCmd + XReadStreams(streams ...string) *XStreamSliceCmd + XGroupCreate(stream, group, start string) *StatusCmd + XGroupSetID(stream, group, start string) *StatusCmd + XGroupDestroy(stream, group string) *IntCmd + XGroupDelConsumer(stream, group, consumer string) *IntCmd + XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd + XAck(stream, group string, ids ...string) *IntCmd + XPending(stream, group string) *XPendingCmd + XPendingExt(a *XPendingExtArgs) *XPendingExtCmd + XClaim(a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(a *XClaimArgs) *StringSliceCmd + XTrim(key string, maxLen int64) *IntCmd + XTrimApprox(key string, maxLen int64) *IntCmd + ZAdd(key string, members ...Z) *IntCmd + ZAddNX(key string, members ...Z) *IntCmd + ZAddXX(key string, members ...Z) *IntCmd + ZAddCh(key string, members ...Z) *IntCmd + ZAddNXCh(key string, members ...Z) *IntCmd + ZAddXXCh(key string, members ...Z) *IntCmd + ZIncr(key string, member Z) *FloatCmd + ZIncrNX(key string, member Z) *FloatCmd + ZIncrXX(key string, member Z) *FloatCmd + ZCard(key string) *IntCmd + ZCount(key, min, max string) *IntCmd + ZLexCount(key, min, max string) *IntCmd + ZIncrBy(key string, increment float64, member string) *FloatCmd + ZInterStore(destination string, store ZStore, keys ...string) *IntCmd + ZPopMax(key string, count ...int64) *ZSliceCmd + ZPopMin(key string, count ...int64) *ZSliceCmd + ZRange(key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRank(key, member string) *IntCmd + ZRem(key string, members ...interface{}) *IntCmd + ZRemRangeByRank(key string, start, stop int64) *IntCmd + ZRemRangeByScore(key, min, max string) *IntCmd + ZRemRangeByLex(key, min, max string) *IntCmd + ZRevRange(key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd + ZRevRank(key, member string) *IntCmd + ZScore(key, member string) *FloatCmd + ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd + PFAdd(key string, els ...interface{}) *IntCmd + PFCount(keys ...string) *IntCmd + PFMerge(dest string, keys ...string) *StatusCmd + BgRewriteAOF() *StatusCmd + BgSave() *StatusCmd + ClientKill(ipPort string) *StatusCmd + ClientKillByFilter(keys ...string) *IntCmd + ClientList() *StringCmd + ClientPause(dur time.Duration) *BoolCmd + ConfigGet(parameter string) *SliceCmd + ConfigResetStat() *StatusCmd + ConfigSet(parameter, value string) *StatusCmd + ConfigRewrite() *StatusCmd + DBSize() *IntCmd + FlushAll() *StatusCmd + FlushAllAsync() *StatusCmd + FlushDB() *StatusCmd + FlushDBAsync() *StatusCmd + Info(section ...string) *StringCmd + LastSave() *IntCmd + Save() *StatusCmd + Shutdown() *StatusCmd + ShutdownSave() *StatusCmd + ShutdownNoSave() *StatusCmd + SlaveOf(host, port string) *StatusCmd + Time() *TimeCmd + Eval(script string, keys []string, args ...interface{}) *Cmd + EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(hashes ...string) *BoolSliceCmd + ScriptFlush() *StatusCmd + ScriptKill() *StatusCmd + ScriptLoad(script string) *StringCmd + DebugObject(key string) *StringCmd + Publish(channel string, message interface{}) *IntCmd + PubSubChannels(pattern string) *StringSliceCmd + PubSubNumSub(channels ...string) *StringIntMapCmd + PubSubNumPat() *IntCmd + ClusterSlots() *ClusterSlotsCmd + ClusterNodes() *StringCmd + ClusterMeet(host, port string) *StatusCmd + ClusterForget(nodeID string) *StatusCmd + ClusterReplicate(nodeID string) *StatusCmd + ClusterResetSoft() *StatusCmd + ClusterResetHard() *StatusCmd + ClusterInfo() *StringCmd + ClusterKeySlot(key string) *IntCmd + ClusterCountFailureReports(nodeID string) *IntCmd + ClusterCountKeysInSlot(slot int) *IntCmd + ClusterDelSlots(slots ...int) *StatusCmd + ClusterDelSlotsRange(min, max int) *StatusCmd + ClusterSaveConfig() *StatusCmd + ClusterSlaves(nodeID string) *StringSliceCmd + ClusterFailover() *StatusCmd + ClusterAddSlots(slots ...int) *StatusCmd + ClusterAddSlotsRange(min, max int) *StatusCmd + GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(key string, members ...string) *GeoPosCmd + GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoDist(key string, member1, member2, unit string) *FloatCmd + GeoHash(key string, members ...string) *StringSliceCmd + ReadOnly() *StatusCmd + ReadWrite() *StatusCmd + MemoryUsage(key string, samples ...int) *IntCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(password string) *StatusCmd + Select(index int) *StatusCmd + SwapDB(index1, index2 int) *StatusCmd + ClientSetName(name string) *BoolCmd +} + +var _ Cmdable = (*Client)(nil) +var _ Cmdable = (*Tx)(nil) +var _ Cmdable = (*Ring)(nil) +var _ Cmdable = (*ClusterClient)(nil) + +type cmdable struct { + process func(cmd Cmder) error +} + +func (c *cmdable) setProcessor(fn func(Cmder) error) { + c.process = fn +} + +type statefulCmdable struct { + cmdable + process func(cmd Cmder) error +} + +func (c *statefulCmdable) setProcessor(fn func(Cmder) error) { + c.process = fn + c.cmdable.setProcessor(fn) +} + +//------------------------------------------------------------------------------ + +func (c *statefulCmdable) Auth(password string) *StatusCmd { + cmd := NewStatusCmd("auth", password) + c.process(cmd) + return cmd +} + +func (c *cmdable) Echo(message interface{}) *StringCmd { + cmd := NewStringCmd("echo", message) + c.process(cmd) + return cmd +} + +func (c *cmdable) Ping() *StatusCmd { + cmd := NewStatusCmd("ping") + c.process(cmd) + return cmd +} + +func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond)) + c.process(cmd) + return cmd +} + +func (c *cmdable) Quit() *StatusCmd { + panic("not implemented") +} + +func (c *statefulCmdable) Select(index int) *StatusCmd { + cmd := NewStatusCmd("select", index) + c.process(cmd) + return cmd +} + +func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd { + cmd := NewStatusCmd("swapdb", index1, index2) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Command() *CommandsInfoCmd { + cmd := NewCommandsInfoCmd("command") + c.process(cmd) + return cmd +} + +func (c *cmdable) Del(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Unlink(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Dump(key string) *StringCmd { + cmd := NewStringCmd("dump", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Exists(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("expire", key, formatSec(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd("expireat", key, tm.Unix()) + c.process(cmd) + return cmd +} + +func (c *cmdable) Keys(pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd("keys", pattern) + c.process(cmd) + return cmd +} + +func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + "migrate", + host, + port, + key, + db, + formatMs(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) Move(key string, db int64) *BoolCmd { + cmd := NewBoolCmd("move", key, db) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectRefCount(key string) *IntCmd { + cmd := NewIntCmd("object", "refcount", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectEncoding(key string) *StringCmd { + cmd := NewStringCmd("object", "encoding", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ObjectIdleTime(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "object", "idletime", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Persist(key string) *BoolCmd { + cmd := NewBoolCmd("persist", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd("pexpire", key, formatMs(expiration)) + c.process(cmd) + return cmd +} + +func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) PTTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Millisecond, "pttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RandomKey() *StringCmd { + cmd := NewStringCmd("randomkey") + c.process(cmd) + return cmd +} + +func (c *cmdable) Rename(key, newkey string) *StatusCmd { + cmd := NewStatusCmd("rename", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) RenameNX(key, newkey string) *BoolCmd { + cmd := NewBoolCmd("renamenx", key, newkey) + c.process(cmd) + return cmd +} + +func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + "restore", + key, + formatMs(ttl), + value, + "replace", + ) + c.process(cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(sort.args(key)...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Touch(keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) TTL(key string) *DurationCmd { + cmd := NewDurationCmd(time.Second, "ttl", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Type(key string) *StatusCmd { + cmd := NewStatusCmd("type", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(c.process, args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) Append(key, value string) *IntCmd { + cmd := NewIntCmd("append", key, value) + c.process(cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd { + return c.bitOp("and", destKey, keys...) +} + +func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd { + return c.bitOp("or", destKey, keys...) +} + +func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd { + return c.bitOp("xor", destKey, keys...) +} + +func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd { + return c.bitOp("not", destKey, key) +} + +func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) Decr(key string) *IntCmd { + cmd := NewIntCmd("decr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd { + cmd := NewIntCmd("decrby", key, decrement) + c.process(cmd) + return cmd +} + +// Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c *cmdable) Get(key string) *StringCmd { + cmd := NewStringCmd("get", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetBit(key string, offset int64) *IntCmd { + cmd := NewIntCmd("getbit", key, offset) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetRange(key string, start, end int64) *StringCmd { + cmd := NewStringCmd("getrange", key, start, end) + c.process(cmd) + return cmd +} + +func (c *cmdable) GetSet(key string, value interface{}) *StringCmd { + cmd := NewStringCmd("getset", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) Incr(key string) *IntCmd { + cmd := NewIntCmd("incr", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrBy(key string, value int64) *IntCmd { + cmd := NewIntCmd("incrby", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd { + cmd := NewFloatCmd("incrbyfloat", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) MGet(keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "mset" + args = appendArgs(args, pairs) + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(pairs)) + args[0] = "msetnx" + args = appendArgs(args, pairs) + cmd := NewBoolCmd(args...) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration]` command. +// +// Use expiration for `SETEX`-like behavior. +// Zero expiration means the key has no expiration time. +func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 4) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(expiration)) + } else { + args = append(args, "ex", formatSec(expiration)) + } + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + "setbit", + key, + offset, + value, + ) + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd("setnx", key, value) + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx") + } + } + c.process(cmd) + return cmd +} + +// Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + if expiration == 0 { + cmd = NewBoolCmd("set", key, value, "xx") + } else { + if usePrecise(expiration) { + cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx") + } else { + cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx") + } + } + c.process(cmd) + return cmd +} + +func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd("setrange", key, offset, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) StrLen(key string) *IntCmd { + cmd := NewIntCmd("strlen", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) HDel(key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HExists(key, field string) *BoolCmd { + cmd := NewBoolCmd("hexists", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGet(key, field string) *StringCmd { + cmd := NewStringCmd("hget", key, field) + c.process(cmd) + return cmd +} + +func (c *cmdable) HGetAll(key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd("hgetall", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd { + cmd := NewIntCmd("hincrby", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd("hincrbyfloat", key, field, incr) + c.process(cmd) + return cmd +} + +func (c *cmdable) HKeys(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hkeys", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HLen(key string) *IntCmd { + cmd := NewIntCmd("hlen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd { + args := make([]interface{}, 2+len(fields)*2) + args[0] = "hmset" + args[1] = key + i := 2 + for k, v := range fields { + args[i] = k + args[i+1] = v + i += 2 + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hset", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd("hsetnx", key, field, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) HVals(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("hvals", key) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(timeout) + cmd := NewStringSliceCmd(args...) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + "brpoplpush", + source, + destination, + formatSec(timeout), + ) + cmd.setReadTimeout(timeout) + c.process(cmd) + return cmd +} + +func (c *cmdable) LIndex(key string, index int64) *StringCmd { + cmd := NewStringCmd("lindex", key, index) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, op, pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "before", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd("linsert", key, "after", pivot, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LLen(key string) *IntCmd { + cmd := NewIntCmd("llen", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPop(key string) *StringCmd { + cmd := NewStringCmd("lpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) LPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("lpushx", key, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + "lrange", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd("lrem", key, count, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd("lset", key, index, value) + c.process(cmd) + return cmd +} + +func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + "ltrim", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPop(key string) *StringCmd { + cmd := NewStringCmd("rpop", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPopLPush(source, destination string) *StringCmd { + cmd := NewStringCmd("rpoplpush", source, destination) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) RPushX(key string, value interface{}) *IntCmd { + cmd := NewIntCmd("rpushx", key, value) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SCard(key string) *IntCmd { + cmd := NewIntCmd("scard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiff(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInter(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("sismember", key, member) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a slice +func (c *cmdable) SMembers(key string) *StringSliceCmd { + cmd := NewStringSliceCmd("smembers", key) + c.process(cmd) + return cmd +} + +// Redis `SMEMBERS key` command output as a map +func (c *cmdable) SMembersMap(key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd("smembers", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd("smove", source, destination, member) + c.process(cmd) + return cmd +} + +// Redis `SPOP key` command. +func (c *cmdable) SPop(key string) *StringCmd { + cmd := NewStringCmd("spop", key) + c.process(cmd) + return cmd +} + +// Redis `SPOP key count` command. +func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("spop", key, count) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key` command. +func (c *cmdable) SRandMember(key string) *StringCmd { + cmd := NewStringCmd("srandmember", key) + c.process(cmd) + return cmd +} + +// Redis `SRANDMEMBER key count` command. +func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd("srandmember", key, count) + c.process(cmd) + return cmd +} + +func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnion(keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type XAddArgs struct { + Stream string + MaxLen int64 // MAXLEN N + MaxLenApprox int64 // MAXLEN ~ N + ID string + Values map[string]interface{} +} + +func (c *cmdable) XAdd(a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 6+len(a.Values)*2) + args = append(args, "xadd") + args = append(args, a.Stream) + if a.MaxLen > 0 { + args = append(args, "maxlen", a.MaxLen) + } else if a.MaxLenApprox > 0 { + args = append(args, "maxlen", "~", a.MaxLenApprox) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + for k, v := range a.Values { + args = append(args, k) + args = append(args, v) + } + + cmd := NewStringCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XLen(stream string) *IntCmd { + cmd := NewIntCmd("xlen", stream) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count) + c.process(cmd) + return cmd +} + +type XReadArgs struct { + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 5+len(a.Streams)) + args = append(args, "xread") + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XReadStreams(streams ...string) *XStreamSliceCmd { + return c.XRead(&XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "create", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd { + cmd := NewStatusCmd("xgroup", "setid", stream, group, start) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDestroy(stream, group string) *IntCmd { + cmd := NewIntCmd("xgroup", "destroy", stream, group) + c.process(cmd) + return cmd +} + +func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd { + cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer) + c.process(cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string + Count int64 + Block time.Duration +} + +func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 8+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + } + args = append(args, "streams") + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + c.process(cmd) + return cmd +} + +func (c *cmdable) XAck(stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XPending(stream, group string) *XPendingCmd { + cmd := NewXPendingCmd("xpending", stream, group) + c.process(cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Start string + End string + Count int64 + Consumer string +} + +func (c *cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(args...) + c.process(cmd) + return cmd +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c *cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 4+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +func (c *cmdable) XTrim(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", maxLen) + c.process(cmd) + return cmd +} + +func (c *cmdable) XTrimApprox(key string, maxLen int64) *IntCmd { + cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZStore is used as an arg to ZInterStore and ZUnionStore. +type ZStore struct { + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewIntCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key score member [score member ...]` command. +func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd { + const n = 2 + a := make([]interface{}, n+2*len(members)) + a[0], a[1] = "zadd", key + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX score member [score member ...]` command. +func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "nx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX score member [score member ...]` command. +func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "xx" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key CH score member [score member ...]` command. +func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd { + const n = 3 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2] = "zadd", key, "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key NX CH score member [score member ...]` command. +func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch" + return c.zAdd(a, n, members...) +} + +// Redis `ZADD key XX CH score member [score member ...]` command. +func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd { + const n = 4 + a := make([]interface{}, n+2*len(members)) + a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch" + return c.zAdd(a, n, members...) +} + +func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd { + for i, m := range members { + a[n+2*i] = m.Score + a[n+2*i+1] = m.Member + } + cmd := NewFloatCmd(a...) + c.process(cmd) + return cmd +} + +// Redis `ZADD key INCR score member` command. +func (c *cmdable) ZIncr(key string, member Z) *FloatCmd { + const n = 3 + a := make([]interface{}, n+2) + a[0], a[1], a[2] = "zadd", key, "incr" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key NX INCR score member` command. +func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx" + return c.zIncr(a, n, member) +} + +// Redis `ZADD key XX INCR score member` command. +func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd { + const n = 4 + a := make([]interface{}, n+2) + a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx" + return c.zIncr(a, n, member) +} + +func (c *cmdable) ZCard(key string) *IntCmd { + cmd := NewIntCmd("zcard", key) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZLexCount(key, min, max string) *IntCmd { + cmd := NewIntCmd("zlexcount", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd("zincrby", key, increment, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zinterstore" + args[1] = destination + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd { + args := []interface{}{ + "zrange", + key, + start, + stop, + } + if withScores { + args = append(args, "withscores") + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd { + return c.zRange(key, start, stop, false) +} + +func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebyscore", key, opt, false) +} + +func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRangeBy("zrangebylex", key, opt, false) +} + +func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + "zremrangebyrank", + key, + start, + stop, + ) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebyscore", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd { + cmd := NewIntCmd("zremrangebylex", key, min, max) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd("zrevrange", key, start, stop) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores") + c.process(cmd) + return cmd +} + +func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebyscore", key, opt) +} + +func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy("zrevrangebylex", key, opt) +} + +func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZRevRank(key, member string) *IntCmd { + cmd := NewIntCmd("zrevrank", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZScore(key, member string) *FloatCmd { + cmd := NewFloatCmd("zscore", key, member) + c.process(cmd) + return cmd +} + +func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zunionstore" + args[1] = dest + args[2] = len(keys) + for i, key := range keys { + args[3+i] = key + } + if len(store.Weights) > 0 { + args = append(args, "weights") + for _, weight := range store.Weights { + args = append(args, weight) + } + } + if store.Aggregate != "" { + args = append(args, "aggregate", store.Aggregate) + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFCount(keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(args...) + c.process(cmd) + return cmd +} + +func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(args...) + c.process(cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c *cmdable) BgRewriteAOF() *StatusCmd { + cmd := NewStatusCmd("bgrewriteaof") + c.process(cmd) + return cmd +} + +func (c *cmdable) BgSave() *StatusCmd { + cmd := NewStatusCmd("bgsave") + c.process(cmd) + return cmd +} + +func (c *cmdable) ClientKill(ipPort string) *StatusCmd { + cmd := NewStatusCmd("client", "kill", ipPort) + c.process(cmd) + return cmd +} + +// ClientKillByFilter is new style synx, while the ClientKill is old +// CLIENT KILL