Provide option for kubernetes to watch only a specific namespace (#433)

This commit is contained in:
Geoff Bourne 2025-07-20 12:59:14 -05:00 committed by GitHub
parent 9a457138ab
commit 7a4f83a30f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 663 additions and 142 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@
/mc-router
/dist/
/*.private.env.json
/kustomization.yml

View File

@ -46,6 +46,8 @@ Routes Minecraft client connections to backend servers based upon the requested
Use in-cluster Kubernetes config (env IN_KUBE_CLUSTER)
-kube-config string
The path to a Kubernetes configuration file (env KUBE_CONFIG)
-kube-namespace string
The namespace to watch or blank for all, which is the default (env KUBE_NAMESPACE)
-mapping value
Comma or newline delimited or repeated mappings of externalHostname=host:port (env MAPPING)
-metrics-backend string
@ -223,6 +225,15 @@ When running `mc-router` as a Kubernetes Pod and you pass the `--in-kube-cluster
- `mc-router.itzg.me/externalServerName` : The value of the annotation will be registered as the external hostname Minecraft clients would used to connect to the routed service. The service is used as the routed backend. You can use more hostnames by splitting them with comma.
- `mc-router.itzg.me/defaultServer` : The service is used as the default if no other `externalServiceName` annotations applies.
By default, the router will watch all namespaces for those services; however, a specific namespace can be specified using the `KUBE_NAMESPACE` environment variable. The pod's own namespace could be set using:
```yaml
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
```
For example, start `mc-router`'s container spec with
```yaml
@ -253,6 +264,26 @@ metadata:
"mc-router.itzg.me/externalServerName": "external.host.name,other.host.name"
```
The `Role` or `ClusterRole` bound to the service account should have the rules:
```yaml
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["watch","list"]
```
and if using StatefulSet auto-scaling additionally
```yaml
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["watch","list","get","update"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get","update"]
```
### Service parsing
To detrmine the endpoint mc-router will pick the host from `spec.clusterIP` by default, if the service is of type `ExtenalName` it will use `spec.externalName` instead.

View File

@ -0,0 +1,86 @@
# used by ../skaffold.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: mc-router
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: services-watcher
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["watch","list"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["watch","list","get","update"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mc-router-services-watcher
subjects:
- kind: ServiceAccount
name: mc-router
namespace: default
roleRef:
kind: ClusterRole
name: services-watcher
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mc-router
name: mc-router-deployment
spec:
selector:
matchLabels:
app: mc-router
strategy:
type: Recreate
template:
metadata:
labels:
app: mc-router
spec:
serviceAccountName: mc-router
containers:
- image: itzg/mc-router
name: mc-router
# Add "--auto-scale-up" here for https://github.com/itzg/mc-router/#auto-scale-up
args:
- --api-binding
- :8080
- --in-kube-cluster
ports:
- name: proxy
containerPort: 25565
- name: web
containerPort: 8080
resources:
requests:
memory: 50Mi
cpu: "100m"
limits:
memory: 100Mi
cpu: "250m"
---
apiVersion: v1
kind: Service
metadata:
name: mc-router
spec:
selector:
app: mc-router
ports:
- protocol: TCP
port: 25565
targetPort: proxy
type: NodePort

View File

@ -6,7 +6,7 @@ metadata:
name: mc-router
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
kind: Role
metadata:
name: services-watcher
rules:
@ -14,11 +14,14 @@ rules:
resources: ["services"]
verbs: ["watch","list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "statefulsets/scale"]
resources: ["statefulsets"]
verbs: ["watch","list","get","update"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
kind: RoleBinding
metadata:
name: mc-router-services-watcher
subjects:
@ -26,7 +29,7 @@ subjects:
name: mc-router
namespace: default
roleRef:
kind: ClusterRole
kind: Role
name: services-watcher
apiGroup: rbac.authorization.k8s.io
---
@ -35,7 +38,7 @@ kind: Deployment
metadata:
labels:
app: mc-router
name: mc-router-deployment
name: mc-router
spec:
selector:
matchLabels:
@ -56,6 +59,11 @@ spec:
- --api-binding
- :8080
- --in-kube-cluster
env:
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: proxy
containerPort: 25565

1
go.mod
View File

@ -58,6 +58,7 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect

64
kustomize/README.md Normal file
View File

@ -0,0 +1,64 @@
https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/
## Example
To use your own dev image, such as via [Github Packages](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry), create `kustomization.yml` and alter the overlay to choose `role` or `cluster-role`. This example assumes that a docker image pull secret has been created and named `ghrc-pull`, [see below](#creating-image-pull-secret).
```yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- https://github.com/itzg/mc-router/kustomize/overlays/role
# OR
# - https://github.com/itzg/mc-router/kustomize/overlays/cluster-role
images:
- name: itzg/mc-router
# replace your-user-org with your Github user/org and/or replace ghcr.io with your Docker registry
newName: ghcr.io/your-user-org/mc-router-dev
patches:
- target:
name: mc-router
kind: Deployment
patch: |-
apiVersion: apps/v1
kind: Deployment
metadata:
name: _
spec:
template:
spec:
imagePullSecrets:
- name: ghcr-pull
containers:
- name: mc-router
imagePullPolicy: Always
```
### Creating image pull secret
The following is an example of [creating an image pull secret](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_secret_docker-registry/) named `ghrc-pull`. Be sure to replace `your-user-org` and the password will be a [personal access token](https://github.com/settings/tokens) with `read:packages` scope.
```shell
kubectl create secret docker-registry ghcr-pull --docker-server=ghcr.io --docker-username=your-user-org --docker-password=ghp_...
```
### Build and push your image
Be sure to replace `your-user-org`:
```shell
docker build -t ghcr.io/your-user-org/mc-router-dev
docker push ghcr.io/your-user-org/mc-router-dev
```
### Apply the kustomization
```shell
kubectl apply -k .
```
or if you want to preview what will be generated and applied:
```shell
kubectl kustomize
```

View File

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- service-account.yml
- mc-router-deployment.yml
- server-examples.yaml

View File

@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mc-router
name: mc-router
spec:
selector:
matchLabels:
app: mc-router
strategy:
type: Recreate
template:
metadata:
labels:
app: mc-router
spec:
serviceAccountName: mc-router
containers:
- image: itzg/mc-router
name: mc-router
# Add "--auto-scale-up" here for https://github.com/itzg/mc-router/#auto-scale-up
args:
- --api-binding
- :8080
- --in-kube-cluster
env:
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: proxy
containerPort: 25565
- name: web
containerPort: 8080
resources:
requests:
memory: 50Mi
cpu: "100m"
limits:
memory: 100Mi
cpu: "250m"
---
apiVersion: v1
kind: Service
metadata:
name: mc-router
spec:
selector:
app: mc-router
ports:
- protocol: TCP
port: 25565
targetPort: proxy
type: NodePort

View File

@ -0,0 +1,84 @@
---
apiVersion: v1
kind: Service
metadata:
name: mc-latest
annotations:
"mc-router.itzg.me/defaultServer": "true"
spec:
type: NodePort
ports:
- port: 25565
name: minecraft
selector:
app: mc-latest
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mc-latest
name: mc-latest
spec:
selector:
matchLabels:
app: mc-latest
template:
metadata:
labels:
app: mc-latest
spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
containers:
- image: itzg/minecraft-server
name: mc-latest
env:
- name: EULA
value: "TRUE"
ports:
- containerPort: 25565
---
apiVersion: v1
kind: Service
metadata:
name: mc-snapshot
annotations:
"mc-router.itzg.me/externalServerName": "snapshot.your.domain"
spec:
type: NodePort
ports:
- port: 25565
name: minecraft
selector:
app: mc-snapshot
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: mc-snapshot
name: mc-snapshot
spec:
selector:
matchLabels:
app: mc-snapshot
template:
metadata:
labels:
app: mc-snapshot
spec:
securityContext:
runAsUser: 1000
fsGroup: 1000
containers:
- image: itzg/minecraft-server
name: mc-snapshot
env:
- name: EULA
value: "TRUE"
- name: VERSION
value: "SNAPSHOT"
ports:
- containerPort: 25565

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: mc-router

View File

@ -0,0 +1,28 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: services-watcher
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["watch","list"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["watch","list","get","update"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: mc-router-services-watcher
subjects:
- kind: ServiceAccount
name: mc-router
namespace: default
roleRef:
kind: ClusterRole
name: services-watcher
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
- cluster-role.yml

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
- role.yml

View File

@ -0,0 +1,28 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: services-watcher
rules:
- apiGroups: [""]
resources: ["services"]
verbs: ["watch","list"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["watch","list","get","update"]
- apiGroups: ["apps"]
resources: ["statefulsets/scale"]
verbs: ["get","update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: mc-router-services-watcher
subjects:
- kind: ServiceAccount
name: mc-router
namespace: default
roleRef:
kind: Role
name: services-watcher
apiGroup: rbac.authorization.k8s.io

View File

@ -26,6 +26,7 @@ type Config struct {
ConnectionRateLimit int `default:"1" usage:"Max number of connections to allow per second"`
InKubeCluster bool `usage:"Use in-cluster Kubernetes config"`
KubeConfig string `usage:"The path to a Kubernetes configuration file"`
KubeNamespace string `usage:"The namespace to watch or blank for all, which is the default"`
InDocker bool `usage:"Use Docker service discovery"`
InDockerSwarm bool `usage:"Use Docker Swarm service discovery"`
DockerSocket string `default:"unix:///var/run/docker.sock" usage:"Path to Docker socket to use"`

View File

@ -25,46 +25,60 @@ const (
AnnotationDefaultServer = "mc-router.itzg.me/defaultServer"
)
type IK8sWatcher interface {
StartWithConfig(ctx context.Context, kubeConfigFile string, autoScaleUp bool, autoScaleDown bool) error
StartInCluster(ctx context.Context, autoScaleUp bool, autoScaleDown bool) error
}
var K8sWatcher IK8sWatcher = &k8sWatcherImpl{}
type k8sWatcherImpl struct {
// K8sWatcher is a RouteFinder that can find routes from kubernetes services.
// It also watches for stateful sets to auto scale up/down, if enabled.
type K8sWatcher struct {
sync.RWMutex
config *rest.Config
autoScaleUp bool
autoScaleDown bool
namespace string
// The key in mappings is a Service, and the value the StatefulSet name
mappings map[string]string
clientset *kubernetes.Clientset
mappings map[string]string
routesHandler RoutesHandler
clientset *kubernetes.Clientset
}
func (w *k8sWatcherImpl) StartInCluster(ctx context.Context, autoScaleUp bool, autoScaleDown bool) error {
func NewK8sWatcherInCluster() (*K8sWatcher, error) {
config, err := rest.InClusterConfig()
if err != nil {
return errors.Wrap(err, "Unable to load in-cluster config")
return nil, errors.Wrap(err, "Unable to load in-cluster config")
}
return w.startWithLoadedConfig(ctx, config, autoScaleUp, autoScaleDown)
return &K8sWatcher{
config: config,
}, nil
}
func (w *k8sWatcherImpl) StartWithConfig(ctx context.Context, kubeConfigFile string, autoScaleUp bool, autoScaleDown bool) error {
func NewK8sWatcherWithConfig(kubeConfigFile string) (*K8sWatcher, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigFile)
if err != nil {
return errors.Wrap(err, "Could not load kube config file")
return nil, errors.Wrap(err, "Could not load kube config file")
}
return w.startWithLoadedConfig(ctx, config, autoScaleUp, autoScaleDown)
return &K8sWatcher{
config: config,
}, nil
}
func (w *k8sWatcherImpl) startWithLoadedConfig(ctx context.Context, config *rest.Config, autoScaleUp bool, autoScaleDown bool) error {
func (w *K8sWatcher) WithAutoScale(autoScaleUp bool, autoScaleDown bool) *K8sWatcher {
w.autoScaleUp = autoScaleUp
w.autoScaleDown = autoScaleDown
return w
}
clientset, err := kubernetes.NewForConfig(config)
func (w *K8sWatcher) WithNamespace(namespace string) *K8sWatcher {
w.namespace = namespace
return w
}
func (w *K8sWatcher) String() string {
return "k8s"
}
func (w *K8sWatcher) Start(ctx context.Context, handler RoutesHandler) error {
w.routesHandler = handler
clientset, err := kubernetes.NewForConfig(w.config)
if err != nil {
return errors.Wrap(err, "Could not create kube clientset")
}
@ -74,7 +88,7 @@ func (w *k8sWatcherImpl) startWithLoadedConfig(ctx context.Context, config *rest
ListerWatcher: cache.NewListWatchFromClient(
clientset.CoreV1().RESTClient(),
string(core.ResourceServices),
core.NamespaceAll,
w.namespace,
fields.Everything(),
),
ObjectType: &core.Service{},
@ -87,51 +101,22 @@ func (w *k8sWatcherImpl) startWithLoadedConfig(ctx context.Context, config *rest
go serviceController.RunWithContext(ctx)
w.mappings = make(map[string]string)
if autoScaleUp || autoScaleDown {
_, statefulSetController := cache.NewInformer(
cache.NewListWatchFromClient(
if w.autoScaleUp || w.autoScaleDown {
_, statefulSetController := cache.NewInformerWithOptions(cache.InformerOptions{
ListerWatcher: cache.NewListWatchFromClient(
clientset.AppsV1().RESTClient(),
"statefulSets",
core.NamespaceAll,
w.namespace,
fields.Everything(),
),
&apps.StatefulSet{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
statefulSet, ok := obj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
w.mappings[statefulSet.Spec.ServiceName] = statefulSet.Name
},
DeleteFunc: func(obj interface{}) {
statefulSet, ok := obj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
delete(w.mappings, statefulSet.Spec.ServiceName)
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldStatefulSet, ok := oldObj.(*apps.StatefulSet)
if !ok {
return
}
newStatefulSet, ok := newObj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
delete(w.mappings, oldStatefulSet.Spec.ServiceName)
w.mappings[newStatefulSet.Spec.ServiceName] = newStatefulSet.Name
},
ObjectType: &apps.StatefulSet{},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: w.handleAddStatefulSet(),
DeleteFunc: w.handleDeleteStatefulSet(),
UpdateFunc: w.handleUpdateStatefulSet(),
},
)
})
go statefulSetController.RunWithContext(ctx)
}
@ -139,14 +124,55 @@ func (w *k8sWatcherImpl) startWithLoadedConfig(ctx context.Context, config *rest
return nil
}
func (w *K8sWatcher) handleAddStatefulSet() func(obj interface{}) {
return func(obj interface{}) {
statefulSet, ok := obj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
w.mappings[statefulSet.Spec.ServiceName] = statefulSet.Name
}
}
func (w *K8sWatcher) handleUpdateStatefulSet() func(oldObj interface{}, newObj interface{}) {
return func(oldObj, newObj interface{}) {
oldStatefulSet, ok := oldObj.(*apps.StatefulSet)
if !ok {
return
}
newStatefulSet, ok := newObj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
delete(w.mappings, oldStatefulSet.Spec.ServiceName)
w.mappings[newStatefulSet.Spec.ServiceName] = newStatefulSet.Name
}
}
func (w *K8sWatcher) handleDeleteStatefulSet() func(obj interface{}) {
return func(obj interface{}) {
statefulSet, ok := obj.(*apps.StatefulSet)
if !ok {
return
}
w.RLock()
defer w.RUnlock()
delete(w.mappings, statefulSet.Spec.ServiceName)
}
}
// oldObj and newObj are expected to be *v1.Service
func (w *k8sWatcherImpl) handleUpdate(oldObj interface{}, newObj interface{}) {
func (w *K8sWatcher) handleUpdate(oldObj interface{}, newObj interface{}) {
for _, oldRoutableService := range w.extractRoutableServices(oldObj) {
logrus.WithFields(logrus.Fields{
"old": oldRoutableService,
}).Debug("UPDATE")
if oldRoutableService.externalServiceName != "" {
Routes.DeleteMapping(oldRoutableService.externalServiceName)
w.routesHandler.DeleteMapping(oldRoutableService.externalServiceName)
}
}
@ -155,40 +181,40 @@ func (w *k8sWatcherImpl) handleUpdate(oldObj interface{}, newObj interface{}) {
"new": newRoutableService,
}).Debug("UPDATE")
if newRoutableService.externalServiceName != "" {
Routes.CreateMapping(newRoutableService.externalServiceName, newRoutableService.containerEndpoint, newRoutableService.autoScaleUp, newRoutableService.autoScaleDown)
w.routesHandler.CreateMapping(newRoutableService.externalServiceName, newRoutableService.containerEndpoint, newRoutableService.autoScaleUp, newRoutableService.autoScaleDown)
} else {
Routes.SetDefaultRoute(newRoutableService.containerEndpoint)
w.routesHandler.SetDefaultRoute(newRoutableService.containerEndpoint)
}
}
}
// obj is expected to be a *v1.Service
func (w *k8sWatcherImpl) handleDelete(obj interface{}) {
func (w *K8sWatcher) handleDelete(obj interface{}) {
routableServices := w.extractRoutableServices(obj)
for _, routableService := range routableServices {
if routableService != nil {
logrus.WithField("routableService", routableService).Debug("DELETE")
if routableService.externalServiceName != "" {
Routes.DeleteMapping(routableService.externalServiceName)
w.routesHandler.DeleteMapping(routableService.externalServiceName)
} else {
Routes.SetDefaultRoute("")
w.routesHandler.SetDefaultRoute("")
}
}
}
}
// obj is expected to be a *v1.Service
func (w *k8sWatcherImpl) handleAdd(obj interface{}) {
func (w *K8sWatcher) handleAdd(obj interface{}) {
routableServices := w.extractRoutableServices(obj)
for _, routableService := range routableServices {
if routableService != nil {
logrus.WithField("routableService", routableService).Debug("ADD")
if routableService.externalServiceName != "" {
Routes.CreateMapping(routableService.externalServiceName, routableService.containerEndpoint, routableService.autoScaleUp, routableService.autoScaleDown)
w.routesHandler.CreateMapping(routableService.externalServiceName, routableService.containerEndpoint, routableService.autoScaleUp, routableService.autoScaleDown)
} else {
Routes.SetDefaultRoute(routableService.containerEndpoint)
w.routesHandler.SetDefaultRoute(routableService.containerEndpoint)
}
}
}
@ -202,7 +228,7 @@ type routableService struct {
}
// obj is expected to be a *v1.Service
func (w *k8sWatcherImpl) extractRoutableServices(obj interface{}) []*routableService {
func (w *K8sWatcher) extractRoutableServices(obj interface{}) []*routableService {
service, ok := obj.(*core.Service)
if !ok {
return nil
@ -222,7 +248,7 @@ func (w *k8sWatcherImpl) extractRoutableServices(obj interface{}) []*routableSer
return nil
}
func (w *k8sWatcherImpl) buildDetails(service *core.Service, externalServiceName string) *routableService {
func (w *K8sWatcher) buildDetails(service *core.Service, externalServiceName string) *routableService {
clusterIp := service.Spec.ClusterIP
if service.Spec.Type == core.ServiceTypeExternalName {
clusterIp = service.Spec.ExternalName
@ -252,7 +278,7 @@ func (w *k8sWatcherImpl) buildDetails(service *core.Service, externalServiceName
return rs
}
func (w *k8sWatcherImpl) buildScaleFunction(service *core.Service, from int32, to int32) ScalerFunc {
func (w *K8sWatcher) buildScaleFunction(service *core.Service, from int32, to int32) ScalerFunc {
if from <= to && !w.autoScaleUp {
return nil
}

View File

@ -3,6 +3,7 @@ package server
import (
"context"
"encoding/json"
"github.com/stretchr/testify/mock"
"testing"
"time"
@ -11,10 +12,50 @@ import (
v1 "k8s.io/api/core/v1"
)
type MockedRoutesHandler struct {
mock.Mock
routes map[string]string
defaultBackend string
}
func (m *MockedRoutesHandler) GetBackendForServer(server string) string {
backend, exists := m.routes[server]
if exists {
return backend
} else {
return m.defaultBackend
}
}
func (m *MockedRoutesHandler) CreateMapping(serverAddress string, backend string, waker ScalerFunc, sleeper ScalerFunc) {
m.MethodCalled("CreateMapping", serverAddress, backend, waker, sleeper)
if m.routes == nil {
m.routes = make(map[string]string)
}
m.routes[serverAddress] = backend
}
func (m *MockedRoutesHandler) SetDefaultRoute(backend string) {
m.MethodCalled("SetDefaultRoute", backend)
if m.routes == nil {
m.routes = make(map[string]string)
}
m.defaultBackend = backend
}
func (m *MockedRoutesHandler) DeleteMapping(serverAddress string) bool {
args := m.MethodCalled("DeleteMapping", serverAddress)
if m.routes == nil {
m.routes = make(map[string]string)
}
delete(m.routes, serverAddress)
return args.Bool(0)
}
func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
type scenario struct {
given string
expect string
server string
backend string
}
type svcAndScenarios struct {
svc string
@ -30,15 +71,15 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: ""},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: ""},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: ""},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
},
@ -47,15 +88,15 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: ""},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com,b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
},
@ -64,15 +105,15 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com,b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: ""},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: ""},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
},
@ -81,17 +122,22 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
// DownScaler needs to be instantiated
DownScaler = NewDownScaler(context.Background(), false, 1*time.Second)
Routes.Reset()
watcher := &k8sWatcherImpl{}
routesHandler := new(MockedRoutesHandler)
routesHandler.On("CreateMapping", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return()
routesHandler.On("DeleteMapping", mock.Anything).Return(true)
watcher := &K8sWatcher{
routesHandler: routesHandler,
}
initialSvc := v1.Service{}
err := json.Unmarshal([]byte(test.initial.svc), &initialSvc)
require.NoError(t, err)
watcher.handleAdd(&initialSvc)
for _, s := range test.initial.scenarios {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "initial: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "initial: given=%s", s.server)
}
updatedSvc := v1.Service{}
@ -100,8 +146,8 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
watcher.handleUpdate(&initialSvc, &updatedSvc)
for _, s := range test.update.scenarios {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "update: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "update: given=%s", s.server)
}
})
}
@ -109,8 +155,8 @@ func TestK8sWatcherImpl_handleAddThenUpdate(t *testing.T) {
func TestK8sWatcherImpl_handleAddThenDelete(t *testing.T) {
type scenario struct {
given string
expect string
server string
backend string
}
type svcAndScenarios struct {
svc string
@ -119,20 +165,21 @@ func TestK8sWatcherImpl_handleAddThenDelete(t *testing.T) {
tests := []struct {
name string
initial svcAndScenarios
delete []scenario
// non-empty `backend` in this case means the server is expected to be deleted
delete []scenario
}{
{
name: "single",
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: ""},
},
},
delete: []scenario{
{given: "a.com", expect: ""},
{given: "b.com", expect: ""},
{server: "a.com", backend: ""},
{server: "b.com", backend: ""},
},
},
{
@ -140,13 +187,13 @@ func TestK8sWatcherImpl_handleAddThenDelete(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com,b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
delete: []scenario{
{given: "a.com", expect: ""},
{given: "b.com", expect: ""},
{server: "a.com", backend: ""},
{server: "b.com", backend: ""},
},
},
}
@ -154,23 +201,28 @@ func TestK8sWatcherImpl_handleAddThenDelete(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
// DownScaler needs to be instantiated
DownScaler = NewDownScaler(context.Background(), false, 1*time.Second)
Routes.Reset()
watcher := &k8sWatcherImpl{}
routesHandler := new(MockedRoutesHandler)
routesHandler.On("CreateMapping", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return()
routesHandler.On("DeleteMapping", mock.Anything).Return(true)
watcher := &K8sWatcher{
routesHandler: routesHandler,
}
initialSvc := v1.Service{}
err := json.Unmarshal([]byte(test.initial.svc), &initialSvc)
require.NoError(t, err)
watcher.handleAdd(&initialSvc)
for _, s := range test.initial.scenarios {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "initial: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "initial: given=%s", s.server)
}
watcher.handleDelete(&initialSvc)
for _, s := range test.delete {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "update: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "update: given=%s", s.server)
}
})
}
@ -178,8 +230,8 @@ func TestK8sWatcherImpl_handleAddThenDelete(t *testing.T) {
func TestK8s_externalName(t *testing.T) {
type scenario struct {
given string
expect string
server string
backend string
}
type svcAndScenarios struct {
svc string
@ -195,15 +247,15 @@ func TestK8s_externalName(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"type":"ExternalName", "externalName": "mc-server.com"}}`,
scenarios: []scenario{
{given: "a.com", expect: "mc-server.com:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "mc-server.com:25565"},
{server: "b.com", backend: ""},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: ""},
},
},
},
@ -212,15 +264,15 @@ func TestK8s_externalName(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com"}}, "spec":{"type":"ExternalName", "externalName": "mc-server.com"}}`,
scenarios: []scenario{
{given: "a.com", expect: "mc-server.com:25565"},
{given: "b.com", expect: ""},
{server: "a.com", backend: "mc-server.com:25565"},
{server: "b.com", backend: ""},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "b.com"}}, "spec":{"clusterIP": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: ""},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: ""},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
},
@ -229,15 +281,15 @@ func TestK8s_externalName(t *testing.T) {
initial: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com,b.com"}}, "spec":{"type":"ExternalName", "externalName": "mc-server.com"}}`,
scenarios: []scenario{
{given: "a.com", expect: "mc-server.com:25565"},
{given: "b.com", expect: "mc-server.com:25565"},
{server: "a.com", backend: "mc-server.com:25565"},
{server: "b.com", backend: "mc-server.com:25565"},
},
},
update: svcAndScenarios{
svc: ` {"metadata": {"annotations": {"mc-router.itzg.me/externalServerName": "a.com,b.com"}}, "spec":{"type":"ExternalName", "externalName": "1.1.1.1"}}`,
scenarios: []scenario{
{given: "a.com", expect: "1.1.1.1:25565"},
{given: "b.com", expect: "1.1.1.1:25565"},
{server: "a.com", backend: "1.1.1.1:25565"},
{server: "b.com", backend: "1.1.1.1:25565"},
},
},
},
@ -246,17 +298,22 @@ func TestK8s_externalName(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
// DownScaler needs to be instantiated
DownScaler = NewDownScaler(context.Background(), false, 1*time.Second)
Routes.Reset()
watcher := &k8sWatcherImpl{}
routesHandler := new(MockedRoutesHandler)
routesHandler.On("CreateMapping", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return()
routesHandler.On("DeleteMapping", mock.Anything).Return(true)
watcher := &K8sWatcher{
routesHandler: routesHandler,
}
initialSvc := v1.Service{}
err := json.Unmarshal([]byte(test.initial.svc), &initialSvc)
require.NoError(t, err)
watcher.handleAdd(&initialSvc)
for _, s := range test.initial.scenarios {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "initial: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "initial: given=%s", s.server)
}
updatedSvc := v1.Service{}
@ -265,8 +322,8 @@ func TestK8s_externalName(t *testing.T) {
watcher.handleUpdate(&initialSvc, &updatedSvc)
for _, s := range test.update.scenarios {
backend, _, _, _ := Routes.FindBackendForServerAddress(context.Background(), s.given)
assert.Equal(t, s.expect, backend, "update: given=%s", s.given)
backend := routesHandler.GetBackendForServer(s.server)
assert.Equal(t, s.backend, backend, "update: given=%s", s.server)
}
})
}

View File

@ -15,7 +15,23 @@ var EmptyScalerFunc = func(ctx context.Context) error { return nil }
var tcpShieldPattern = regexp.MustCompile("///.*")
// RouteFinder implementations find new routes in the system that can be tracked by a RoutesHandler
type RouteFinder interface {
Start(ctx context.Context, handler RoutesHandler) error
String() string
}
type RoutesHandler interface {
CreateMapping(serverAddress string, backend string, waker ScalerFunc, sleeper ScalerFunc)
SetDefaultRoute(backend string)
// DeleteMapping requests that the serverAddress be removed from routes.
// Returns true if the route existed.
DeleteMapping(serverAddress string) bool
}
type IRoutes interface {
RoutesHandler
Reset()
RegisterAll(mappings map[string]string)
// FindBackendForServerAddress returns the host:port for the external server address, if registered.
@ -24,9 +40,6 @@ type IRoutes interface {
// The 4th value returned is an (optional) "sleeper" function which a caller must invoke to shut down serverAddress.
FindBackendForServerAddress(ctx context.Context, serverAddress string) (string, string, ScalerFunc, ScalerFunc)
GetMappings() map[string]string
DeleteMapping(serverAddress string) bool
CreateMapping(serverAddress string, backend string, waker ScalerFunc, sleeper ScalerFunc)
SetDefaultRoute(backend string)
GetDefaultRoute() string
SimplifySRV(srvEnabled bool)
}

View File

@ -120,18 +120,27 @@ func NewServer(ctx context.Context, config *Config) (*Server, error) {
StartApiServer(config.ApiBinding)
}
routeWatchers := make([]RouteFinder, 0)
if config.InKubeCluster {
err = K8sWatcher.StartInCluster(ctx, config.AutoScale.Up, config.AutoScale.Down)
k8sWatcher, err := NewK8sWatcherInCluster()
if err != nil {
return nil, fmt.Errorf("could not start in-cluster k8s integration: %w", err)
return nil, fmt.Errorf("could not create in-cluster k8s watcher: %w", err)
}
k8sWatcher.WithAutoScale(config.AutoScale.Up, config.AutoScale.Down)
k8sWatcher.WithNamespace(config.KubeNamespace)
routeWatchers = append(routeWatchers, k8sWatcher)
} else if config.KubeConfig != "" {
err := K8sWatcher.StartWithConfig(ctx, config.KubeConfig, config.AutoScale.Up, config.AutoScale.Down)
k8sWatcher, err := NewK8sWatcherWithConfig(config.KubeConfig)
if err != nil {
return nil, fmt.Errorf("could not start k8s integration with kube config: %w", err)
return nil, fmt.Errorf("could not create k8s watcher with kube config: %w", err)
}
k8sWatcher.WithAutoScale(config.AutoScale.Up, config.AutoScale.Down)
k8sWatcher.WithNamespace(config.KubeNamespace)
routeWatchers = append(routeWatchers, k8sWatcher)
}
// TODO convert to RouteFinder
if config.InDocker {
err = DockerWatcher.Start(config.DockerSocket, config.DockerTimeout, config.DockerRefreshInterval, config.AutoScale.Up, config.AutoScale.Down)
if err != nil {
@ -141,6 +150,7 @@ func NewServer(ctx context.Context, config *Config) (*Server, error) {
}
}
// TODO convert to RouteFinder
if config.InDockerSwarm {
err = DockerSwarmWatcher.Start(config.DockerSocket, config.DockerTimeout, config.DockerRefreshInterval, config.AutoScale.Up, config.AutoScale.Down)
if err != nil {
@ -150,6 +160,13 @@ func NewServer(ctx context.Context, config *Config) (*Server, error) {
}
}
for _, watcher := range routeWatchers {
err := watcher.Start(ctx, Routes)
if err != nil {
return nil, fmt.Errorf("could not start route watcher %s: %w", watcher, err)
}
}
Routes.SimplifySRV(config.SimplifySRV)
err = metricsBuilder.Start(ctx)