mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-25 03:35:21 +01:00
Merge branch 'main' into token-service-empty-token-fix
This commit is contained in:
commit
41a6891eab
@ -8,6 +8,9 @@
|
|||||||
* Add date here... Add signature here...
|
* Add date here... Add signature here...
|
||||||
- Add your reason here...
|
- Add your reason here...
|
||||||
|
|
||||||
|
* Oct 24 2024 <yan-yw.wang@broadcom.com>
|
||||||
|
- Refresh base image
|
||||||
|
|
||||||
* Nov 28 2022 <jiaoya@vmware.com>
|
* Nov 28 2022 <jiaoya@vmware.com>
|
||||||
- Refresh base image
|
- Refresh base image
|
||||||
|
|
||||||
|
20
.github/workflows/CI.yml
vendored
20
.github/workflows/CI.yml
vendored
@ -41,10 +41,10 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
timeout-minutes: 100
|
timeout-minutes: 100
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.23
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
@ -102,10 +102,10 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
timeout-minutes: 100
|
timeout-minutes: 100
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.23
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
@ -157,10 +157,10 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
timeout-minutes: 100
|
timeout-minutes: 100
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.23
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
@ -212,10 +212,10 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
timeout-minutes: 100
|
timeout-minutes: 100
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.23
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
@ -265,10 +265,10 @@ jobs:
|
|||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
timeout-minutes: 100
|
timeout-minutes: 100
|
||||||
steps:
|
steps:
|
||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.23
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
|
20
.github/workflows/build-package.yml
vendored
20
.github/workflows/build-package.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
|||||||
- name: Set up Go 1.22
|
- name: Set up Go 1.22
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- name: Setup Docker
|
- name: Setup Docker
|
||||||
uses: docker-practice/actions-setup-docker@master
|
uses: docker-practice/actions-setup-docker@master
|
||||||
@ -96,23 +96,23 @@ jobs:
|
|||||||
echo "Package name is: $harbor_offline_build_bundle"
|
echo "Package name is: $harbor_offline_build_bundle"
|
||||||
echo "Package name is: $harbor_online_build_bundle"
|
echo "Package name is: $harbor_online_build_bundle"
|
||||||
|
|
||||||
echo -en "${{ secrets.HARBOR_SIGN_KEY }}" | gpg --import
|
# echo -en "${{ secrets.HARBOR_SIGN_KEY }}" | gpg --import
|
||||||
gpg -v -ab -u ${{ secrets.HARBOR_SIGN_KEY_ID }} $harbor_offline_build_bundle
|
# gpg -v -ab -u ${{ secrets.HARBOR_SIGN_KEY_ID }} $harbor_offline_build_bundle
|
||||||
gpg -v -ab -u ${{ secrets.HARBOR_SIGN_KEY_ID }} $harbor_online_build_bundle
|
# gpg -v -ab -u ${{ secrets.HARBOR_SIGN_KEY_ID }} $harbor_online_build_bundle
|
||||||
|
|
||||||
source tests/ci/build_util.sh
|
source tests/ci/build_util.sh
|
||||||
cp ${harbor_offline_build_bundle} harbor-offline-installer-latest.tgz
|
cp ${harbor_offline_build_bundle} harbor-offline-installer-latest.tgz
|
||||||
cp ${harbor_offline_build_bundle}.asc harbor-offline-installer-latest.tgz.asc
|
# cp ${harbor_offline_build_bundle}.asc harbor-offline-installer-latest.tgz.asc
|
||||||
cp ${harbor_online_build_bundle} harbor-online-installer-latest.tgz
|
cp ${harbor_online_build_bundle} harbor-online-installer-latest.tgz
|
||||||
cp ${harbor_online_build_bundle}.asc harbor-online-installer-latest.tgz.asc
|
# cp ${harbor_online_build_bundle}.asc harbor-online-installer-latest.tgz.asc
|
||||||
uploader ${harbor_offline_build_bundle} $harbor_target_bucket
|
uploader ${harbor_offline_build_bundle} $harbor_target_bucket
|
||||||
uploader ${harbor_offline_build_bundle}.asc $harbor_target_bucket
|
# uploader ${harbor_offline_build_bundle}.asc $harbor_target_bucket
|
||||||
uploader ${harbor_online_build_bundle} $harbor_target_bucket
|
uploader ${harbor_online_build_bundle} $harbor_target_bucket
|
||||||
uploader ${harbor_online_build_bundle}.asc $harbor_target_bucket
|
# uploader ${harbor_online_build_bundle}.asc $harbor_target_bucket
|
||||||
uploader harbor-offline-installer-latest.tgz $harbor_target_bucket
|
uploader harbor-offline-installer-latest.tgz $harbor_target_bucket
|
||||||
uploader harbor-offline-installer-latest.tgz.asc $harbor_target_bucket
|
# uploader harbor-offline-installer-latest.tgz.asc $harbor_target_bucket
|
||||||
uploader harbor-online-installer-latest.tgz $harbor_target_bucket
|
uploader harbor-online-installer-latest.tgz $harbor_target_bucket
|
||||||
uploader harbor-online-installer-latest.tgz.asc $harbor_target_bucket
|
# uploader harbor-online-installer-latest.tgz.asc $harbor_target_bucket
|
||||||
echo "BUILD_BUNDLE=$harbor_offline_build_bundle" >> $GITHUB_ENV
|
echo "BUILD_BUNDLE=$harbor_offline_build_bundle" >> $GITHUB_ENV
|
||||||
|
|
||||||
publishImage $target_branch $Harbor_Assets_Version "${{ secrets.DOCKER_HUB_USERNAME }}" "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
publishImage $target_branch $Harbor_Assets_Version "${{ secrets.DOCKER_HUB_USERNAME }}" "${{ secrets.DOCKER_HUB_PASSWORD }}"
|
||||||
|
2
.github/workflows/conformance_test.yml
vendored
2
.github/workflows/conformance_test.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
|||||||
- name: Set up Go 1.21
|
- name: Set up Go 1.21
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1.22.3
|
go-version: 1.23.2
|
||||||
id: go
|
id: go
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
|
@ -142,30 +142,31 @@ The folder graph below shows the structure of the source code folder `harbor/src
|
|||||||
#### Go
|
#### Go
|
||||||
Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbor backend service development environment, please [set one up](https://golang.org/doc/install).
|
Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbor backend service development environment, please [set one up](https://golang.org/doc/install).
|
||||||
|
|
||||||
| Harbor | Requires Go |
|
| Harbor | Requires Go |
|
||||||
|----------|---------------|
|
|--------|-------------|
|
||||||
| 1.1 | 1.7.3 |
|
| 1.1 | 1.7.3 |
|
||||||
| 1.2 | 1.7.3 |
|
| 1.2 | 1.7.3 |
|
||||||
| 1.3 | 1.9.2 |
|
| 1.3 | 1.9.2 |
|
||||||
| 1.4 | 1.9.2 |
|
| 1.4 | 1.9.2 |
|
||||||
| 1.5 | 1.9.2 |
|
| 1.5 | 1.9.2 |
|
||||||
| 1.6 | 1.9.2 |
|
| 1.6 | 1.9.2 |
|
||||||
| 1.7 | 1.9.2 |
|
| 1.7 | 1.9.2 |
|
||||||
| 1.8 | 1.11.2 |
|
| 1.8 | 1.11.2 |
|
||||||
| 1.9 | 1.12.12 |
|
| 1.9 | 1.12.12 |
|
||||||
| 1.10 | 1.12.12 |
|
| 1.10 | 1.12.12 |
|
||||||
| 2.0 | 1.13.15 |
|
| 2.0 | 1.13.15 |
|
||||||
| 2.1 | 1.14.13 |
|
| 2.1 | 1.14.13 |
|
||||||
| 2.2 | 1.15.6 |
|
| 2.2 | 1.15.6 |
|
||||||
| 2.3 | 1.15.12 |
|
| 2.3 | 1.15.12 |
|
||||||
| 2.4 | 1.17.7 |
|
| 2.4 | 1.17.7 |
|
||||||
| 2.5 | 1.17.7 |
|
| 2.5 | 1.17.7 |
|
||||||
| 2.6 | 1.18.6 |
|
| 2.6 | 1.18.6 |
|
||||||
| 2.7 | 1.19.4 |
|
| 2.7 | 1.19.4 |
|
||||||
| 2.8 | 1.20.6 |
|
| 2.8 | 1.20.6 |
|
||||||
| 2.9 | 1.21.3 |
|
| 2.9 | 1.21.3 |
|
||||||
| 2.10 | 1.21.8 |
|
| 2.10 | 1.21.8 |
|
||||||
| 2.11 | 1.22.3 |
|
| 2.11 | 1.22.3 |
|
||||||
|
| 2.12 | 1.23.2 |
|
||||||
|
|
||||||
|
|
||||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||||
|
12
Makefile
12
Makefile
@ -104,8 +104,8 @@ PREPARE_VERSION_NAME=versions
|
|||||||
|
|
||||||
#versions
|
#versions
|
||||||
REGISTRYVERSION=v2.8.3-patch-redis
|
REGISTRYVERSION=v2.8.3-patch-redis
|
||||||
TRIVYVERSION=v0.51.2
|
TRIVYVERSION=v0.56.1
|
||||||
TRIVYADAPTERVERSION=v0.31.2
|
TRIVYADAPTERVERSION=v0.32.0-rc.1
|
||||||
|
|
||||||
# version of registry for pulling the source code
|
# version of registry for pulling the source code
|
||||||
REGISTRY_SRC_TAG=v2.8.3
|
REGISTRY_SRC_TAG=v2.8.3
|
||||||
@ -115,7 +115,7 @@ DISTRIBUTION_SRC=https://github.com/distribution/distribution.git
|
|||||||
# dependency binaries
|
# dependency binaries
|
||||||
REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry
|
REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry
|
||||||
TRIVY_DOWNLOAD_URL=https://github.com/aquasecurity/trivy/releases/download/$(TRIVYVERSION)/trivy_$(TRIVYVERSION:v%=%)_Linux-64bit.tar.gz
|
TRIVY_DOWNLOAD_URL=https://github.com/aquasecurity/trivy/releases/download/$(TRIVYVERSION)/trivy_$(TRIVYVERSION:v%=%)_Linux-64bit.tar.gz
|
||||||
TRIVY_ADAPTER_DOWNLOAD_URL=https://github.com/aquasecurity/harbor-scanner-trivy/releases/download/$(TRIVYADAPTERVERSION)/harbor-scanner-trivy_$(TRIVYADAPTERVERSION:v%=%)_Linux_x86_64.tar.gz
|
TRIVY_ADAPTER_DOWNLOAD_URL=https://github.com/goharbor/harbor-scanner-trivy/archive/refs/tags/$(TRIVYADAPTERVERSION).tar.gz
|
||||||
|
|
||||||
define VERSIONS_FOR_PREPARE
|
define VERSIONS_FOR_PREPARE
|
||||||
VERSION_TAG: $(VERSIONTAG)
|
VERSION_TAG: $(VERSIONTAG)
|
||||||
@ -142,7 +142,7 @@ GOINSTALL=$(GOCMD) install
|
|||||||
GOTEST=$(GOCMD) test
|
GOTEST=$(GOCMD) test
|
||||||
GODEP=$(GOTEST) -i
|
GODEP=$(GOTEST) -i
|
||||||
GOFMT=gofmt -w
|
GOFMT=gofmt -w
|
||||||
GOBUILDIMAGE=golang:1.22.3
|
GOBUILDIMAGE=golang:1.23.2
|
||||||
GOBUILDPATHINCONTAINER=/harbor
|
GOBUILDPATHINCONTAINER=/harbor
|
||||||
|
|
||||||
# go build
|
# go build
|
||||||
@ -282,7 +282,7 @@ endef
|
|||||||
|
|
||||||
# lint swagger doc
|
# lint swagger doc
|
||||||
SPECTRAL_IMAGENAME=$(IMAGENAMESPACE)/spectral
|
SPECTRAL_IMAGENAME=$(IMAGENAMESPACE)/spectral
|
||||||
SPECTRAL_VERSION=v6.1.0
|
SPECTRAL_VERSION=v6.11.1
|
||||||
SPECTRAL_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/spectral/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg SPECTRAL_VERSION=${SPECTRAL_VERSION} -t ${SPECTRAL_IMAGENAME}:$(SPECTRAL_VERSION) .
|
SPECTRAL_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/spectral/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg SPECTRAL_VERSION=${SPECTRAL_VERSION} -t ${SPECTRAL_IMAGENAME}:$(SPECTRAL_VERSION) .
|
||||||
SPECTRAL=$(RUNCONTAINER) $(SPECTRAL_IMAGENAME):$(SPECTRAL_VERSION)
|
SPECTRAL=$(RUNCONTAINER) $(SPECTRAL_IMAGENAME):$(SPECTRAL_VERSION)
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ gen_apis: lint_apis
|
|||||||
|
|
||||||
|
|
||||||
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
||||||
MOCKERY_VERSION=v2.43.2
|
MOCKERY_VERSION=v2.46.2
|
||||||
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||||
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ harbor_admin_password: Harbor12345
|
|||||||
|
|
||||||
# Harbor DB configuration
|
# Harbor DB configuration
|
||||||
database:
|
database:
|
||||||
# The password for the root user of Harbor DB. Change this before any production use.
|
# The password for the user('postgres' by default) of Harbor DB. Change this before any production use.
|
||||||
password: root123
|
password: root123
|
||||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
max_idle_conns: 100
|
max_idle_conns: 100
|
||||||
@ -174,7 +174,7 @@ log:
|
|||||||
# port: 5140
|
# port: 5140
|
||||||
|
|
||||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||||
_version: 2.11.0
|
_version: 2.12.0
|
||||||
|
|
||||||
# Uncomment external_database if using external database.
|
# Uncomment external_database if using external database.
|
||||||
# external_database:
|
# external_database:
|
||||||
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
|||||||
@click.command()
|
@click.command()
|
||||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||||
@click.option('-t', '--target', default='2.11.0', help="target version of input path")
|
@click.option('-t', '--target', default='2.12.0', help="target version of input path")
|
||||||
def migrate(input_, output, target):
|
def migrate(input_, output, target):
|
||||||
"""
|
"""
|
||||||
migrate command will migrate config file style to specific version
|
migrate command will migrate config file style to specific version
|
||||||
|
@ -2,4 +2,4 @@ import os
|
|||||||
|
|
||||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||||
|
|
||||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0'}
|
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0'}
|
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
import os
|
||||||
|
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||||
|
from utils.migration import read_conf
|
||||||
|
|
||||||
|
revision = '2.12.0'
|
||||||
|
down_revisions = ['2.11.0']
|
||||||
|
|
||||||
|
def migrate(input_cfg, output_cfg):
|
||||||
|
current_dir = os.path.dirname(__file__)
|
||||||
|
tpl = Environment(
|
||||||
|
loader=FileSystemLoader(current_dir),
|
||||||
|
undefined=StrictUndefined,
|
||||||
|
trim_blocks=True,
|
||||||
|
lstrip_blocks=True,
|
||||||
|
autoescape = select_autoescape()
|
||||||
|
).get_template('harbor.yml.jinja')
|
||||||
|
|
||||||
|
config_dict = read_conf(input_cfg)
|
||||||
|
|
||||||
|
with open(output_cfg, 'w') as f:
|
||||||
|
f.write(tpl.render(**config_dict))
|
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
@ -0,0 +1,737 @@
|
|||||||
|
# Configuration file of Harbor
|
||||||
|
|
||||||
|
# The IP address or hostname to access admin UI and registry service.
|
||||||
|
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||||
|
hostname: {{ hostname }}
|
||||||
|
|
||||||
|
# http related config
|
||||||
|
{% if http is defined %}
|
||||||
|
http:
|
||||||
|
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||||
|
port: {{ http.port }}
|
||||||
|
{% else %}
|
||||||
|
# http:
|
||||||
|
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||||
|
# port: 80
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if https is defined %}
|
||||||
|
# https related config
|
||||||
|
https:
|
||||||
|
# https port for harbor, default is 443
|
||||||
|
port: {{ https.port }}
|
||||||
|
# The path of cert and key files for nginx
|
||||||
|
certificate: {{ https.certificate }}
|
||||||
|
private_key: {{ https.private_key }}
|
||||||
|
# enable strong ssl ciphers (default: false)
|
||||||
|
{% if strong_ssl_ciphers is defined %}
|
||||||
|
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||||
|
{% else %}
|
||||||
|
strong_ssl_ciphers: false
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# https related config
|
||||||
|
# https:
|
||||||
|
# # https port for harbor, default is 443
|
||||||
|
# port: 443
|
||||||
|
# # The path of cert and key files for nginx
|
||||||
|
# certificate: /your/certificate/path
|
||||||
|
# private_key: /your/private/key/path
|
||||||
|
# enable strong ssl ciphers (default: false)
|
||||||
|
# strong_ssl_ciphers: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||||
|
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||||
|
{% if ip_family is defined %}
|
||||||
|
ip_family:
|
||||||
|
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||||
|
{% if ip_family.ipv6 is defined %}
|
||||||
|
ipv6:
|
||||||
|
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||||
|
{% else %}
|
||||||
|
ipv6:
|
||||||
|
enabled: false
|
||||||
|
{% endif %}
|
||||||
|
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||||
|
{% if ip_family.ipv4 is defined %}
|
||||||
|
ipv4:
|
||||||
|
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||||
|
{% else %}
|
||||||
|
ipv4:
|
||||||
|
enabled: true
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# ip_family:
|
||||||
|
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||||
|
# ipv6:
|
||||||
|
# enabled: false
|
||||||
|
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||||
|
# ipv4:
|
||||||
|
# enabled: true
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if internal_tls is defined %}
|
||||||
|
# Uncomment following will enable tls communication between all harbor components
|
||||||
|
internal_tls:
|
||||||
|
# set enabled to true means internal tls is enabled
|
||||||
|
enabled: {{ internal_tls.enabled | lower }}
|
||||||
|
{% if internal_tls.dir is defined %}
|
||||||
|
# put your cert and key files on dir
|
||||||
|
dir: {{ internal_tls.dir }}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# internal_tls:
|
||||||
|
# # set enabled to true means internal tls is enabled
|
||||||
|
# enabled: true
|
||||||
|
# # put your cert and key files on dir
|
||||||
|
# dir: /etc/harbor/tls/internal
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Uncomment external_url if you want to enable external proxy
|
||||||
|
# And when it enabled the hostname will no longer used
|
||||||
|
{% if external_url is defined %}
|
||||||
|
external_url: {{ external_url }}
|
||||||
|
{% else %}
|
||||||
|
# external_url: https://reg.mydomain.com:8433
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# The initial password of Harbor admin
|
||||||
|
# It only works in first time to install harbor
|
||||||
|
# Remember Change the admin password from UI after launching Harbor.
|
||||||
|
{% if harbor_admin_password is defined %}
|
||||||
|
harbor_admin_password: {{ harbor_admin_password }}
|
||||||
|
{% else %}
|
||||||
|
harbor_admin_password: Harbor12345
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor DB configuration
|
||||||
|
database:
|
||||||
|
{% if database is defined %}
|
||||||
|
# The password for the root user of Harbor DB. Change this before any production use.
|
||||||
|
password: {{ database.password}}
|
||||||
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
|
max_idle_conns: {{ database.max_idle_conns }}
|
||||||
|
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||||
|
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||||
|
max_open_conns: {{ database.max_open_conns }}
|
||||||
|
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
{% if database.conn_max_lifetime is defined %}
|
||||||
|
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||||
|
{% else %}
|
||||||
|
conn_max_lifetime: 5m
|
||||||
|
{% endif %}
|
||||||
|
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
{% if database.conn_max_idle_time is defined %}
|
||||||
|
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||||
|
{% else %}
|
||||||
|
conn_max_idle_time: 0
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# The password for the root user of Harbor DB. Change this before any production use.
|
||||||
|
password: root123
|
||||||
|
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||||
|
max_idle_conns: 100
|
||||||
|
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||||
|
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||||
|
max_open_conns: 900
|
||||||
|
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
conn_max_lifetime: 5m
|
||||||
|
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||||
|
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||||
|
conn_max_idle_time: 0
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if data_volume is defined %}
|
||||||
|
# The default data volume
|
||||||
|
data_volume: {{ data_volume }}
|
||||||
|
{% else %}
|
||||||
|
# The default data volume
|
||||||
|
data_volume: /data
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||||
|
# Uncomment storage_service setting If you want to using external storage
|
||||||
|
{% if storage_service is defined %}
|
||||||
|
storage_service:
|
||||||
|
{% for key, value in storage_service.items() %}
|
||||||
|
{% if key == 'ca_bundle' %}
|
||||||
|
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||||
|
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||||
|
ca_bundle: {{ value if value is not none else '' }}
|
||||||
|
{% elif key == 'redirect' %}
|
||||||
|
# # set disable to true when you want to disable registry redirect
|
||||||
|
redirect:
|
||||||
|
{% if storage_service.redirect.disabled is defined %}
|
||||||
|
disable: {{ storage_service.redirect.disabled | lower}}
|
||||||
|
{% else %}
|
||||||
|
disable: {{ storage_service.redirect.disable | lower}}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||||
|
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||||
|
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||||
|
{{ key }}:
|
||||||
|
{% for k, v in value.items() %}
|
||||||
|
{{ k }}: {{ v if v is not none else '' }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
# storage_service:
|
||||||
|
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||||
|
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||||
|
# ca_bundle:
|
||||||
|
|
||||||
|
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||||
|
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||||
|
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||||
|
# filesystem:
|
||||||
|
# maxthreads: 100
|
||||||
|
# # set disable to true when you want to disable registry redirect
|
||||||
|
# redirect:
|
||||||
|
# disable: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Trivy configuration
|
||||||
|
#
|
||||||
|
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||||
|
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||||
|
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||||
|
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||||
|
# 12 hours and published as a new release to GitHub.
|
||||||
|
{% if trivy is defined %}
|
||||||
|
trivy:
|
||||||
|
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||||
|
{% if trivy.ignore_unfixed is defined %}
|
||||||
|
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||||
|
{% else %}
|
||||||
|
ignore_unfixed: false
|
||||||
|
{% endif %}
|
||||||
|
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||||
|
#
|
||||||
|
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||||
|
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||||
|
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||||
|
{% if trivy.skip_update is defined %}
|
||||||
|
skip_update: {{ trivy.skip_update | lower }}
|
||||||
|
{% else %}
|
||||||
|
skip_update: false
|
||||||
|
{% endif %}
|
||||||
|
{% if trivy.skip_java_db_update is defined %}
|
||||||
|
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||||
|
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||||
|
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||||
|
{% else %}
|
||||||
|
skip_java_db_update: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
{% if trivy.offline_scan is defined %}
|
||||||
|
offline_scan: {{ trivy.offline_scan | lower }}
|
||||||
|
{% else %}
|
||||||
|
offline_scan: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||||
|
{% if trivy.security_check is defined %}
|
||||||
|
security_check: {{ trivy.security_check }}
|
||||||
|
{% else %}
|
||||||
|
security_check: vuln
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# insecure The flag to skip verifying registry certificate
|
||||||
|
{% if trivy.insecure is defined %}
|
||||||
|
insecure: {{ trivy.insecure | lower }}
|
||||||
|
{% else %}
|
||||||
|
insecure: false
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
{% if trivy.timeout is defined %}
|
||||||
|
# timeout The duration to wait for scan completion.
|
||||||
|
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||||
|
timeout: {{ trivy.timeout}}
|
||||||
|
{% else %}
|
||||||
|
timeout: 5m0s
|
||||||
|
{% endif %}
|
||||||
|
#
|
||||||
|
# github_token The GitHub access token to download Trivy DB
|
||||||
|
#
|
||||||
|
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||||
|
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||||
|
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||||
|
# https://developer.github.com/v3/#rate-limiting
|
||||||
|
#
|
||||||
|
# You can create a GitHub token by following the instructions in
|
||||||
|
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||||
|
#
|
||||||
|
{% if trivy.github_token is defined %}
|
||||||
|
github_token: {{ trivy.github_token }}
|
||||||
|
{% else %}
|
||||||
|
# github_token: xxx
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# trivy:
|
||||||
|
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||||
|
# ignore_unfixed: false
|
||||||
|
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||||
|
# #
|
||||||
|
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||||
|
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||||
|
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||||
|
# skip_update: false
|
||||||
|
# #
|
||||||
|
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||||
|
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||||
|
# skip_java_db_update: false
|
||||||
|
# #
|
||||||
|
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||||
|
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||||
|
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||||
|
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||||
|
# # It would work if all the dependencies are in local.
|
||||||
|
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||||
|
# offline_scan: false
|
||||||
|
# #
|
||||||
|
# # insecure The flag to skip verifying registry certificate
|
||||||
|
# insecure: false
|
||||||
|
# # github_token The GitHub access token to download Trivy DB
|
||||||
|
# #
|
||||||
|
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||||
|
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||||
|
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||||
|
# # https://developer.github.com/v3/#rate-limiting
|
||||||
|
# #
|
||||||
|
# # timeout The duration to wait for scan completion.
|
||||||
|
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||||
|
# timeout: 5m0s
|
||||||
|
# #
|
||||||
|
# # You can create a GitHub token by following the instructions in
|
||||||
|
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||||
|
# #
|
||||||
|
# # github_token: xxx
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
jobservice:
|
||||||
|
# Maximum number of job workers in job service
|
||||||
|
{% if jobservice is defined %}
|
||||||
|
max_job_workers: {{ jobservice.max_job_workers }}
|
||||||
|
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||||
|
{% if jobservice.job_loggers is defined %}
|
||||||
|
job_loggers:
|
||||||
|
{% for job_logger in jobservice.job_loggers %}
|
||||||
|
- {{job_logger}}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
job_loggers:
|
||||||
|
- STD_OUTPUT
|
||||||
|
- FILE
|
||||||
|
# - DB
|
||||||
|
{% endif %}
|
||||||
|
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||||
|
{% if jobservice.logger_sweeper_duration is defined %}
|
||||||
|
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||||
|
{% else %}
|
||||||
|
logger_sweeper_duration: 1
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
max_job_workers: 10
|
||||||
|
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||||
|
job_loggers:
|
||||||
|
- STD_OUTPUT
|
||||||
|
- FILE
|
||||||
|
# - DB
|
||||||
|
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||||
|
logger_sweeper_duration: 1
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
notification:
|
||||||
|
# Maximum retry count for webhook job
|
||||||
|
{% if notification is defined %}
|
||||||
|
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||||
|
# HTTP client timeout for webhook job
|
||||||
|
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||||
|
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||||
|
{% else %}
|
||||||
|
webhook_job_http_client_timeout: 3 #seconds
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
webhook_job_max_retry: 3
|
||||||
|
# HTTP client timeout for webhook job
|
||||||
|
webhook_job_http_client_timeout: 3 #seconds
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Log configurations
|
||||||
|
log:
|
||||||
|
# options are debug, info, warning, error, fatal
|
||||||
|
{% if log is defined %}
|
||||||
|
level: {{ log.level }}
|
||||||
|
# configs for logs in local storage
|
||||||
|
local:
|
||||||
|
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||||
|
rotate_count: {{ log.local.rotate_count }}
|
||||||
|
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||||
|
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||||
|
# are all valid.
|
||||||
|
rotate_size: {{ log.local.rotate_size }}
|
||||||
|
# The directory on your host that store log
|
||||||
|
location: {{ log.local.location }}
|
||||||
|
{% if log.external_endpoint is defined %}
|
||||||
|
external_endpoint:
|
||||||
|
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
protocol: {{ log.external_endpoint.protocol }}
|
||||||
|
# The host of external endpoint
|
||||||
|
host: {{ log.external_endpoint.host }}
|
||||||
|
# Port of external endpoint
|
||||||
|
port: {{ log.external_endpoint.port }}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment following lines to enable external syslog endpoint.
|
||||||
|
# external_endpoint:
|
||||||
|
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
# protocol: tcp
|
||||||
|
# # The host of external endpoint
|
||||||
|
# host: localhost
|
||||||
|
# # Port of external endpoint
|
||||||
|
# port: 5140
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
level: info
|
||||||
|
# configs for logs in local storage
|
||||||
|
local:
|
||||||
|
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||||
|
rotate_count: 50
|
||||||
|
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||||
|
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||||
|
# are all valid.
|
||||||
|
rotate_size: 200M
|
||||||
|
# The directory on your host that store log
|
||||||
|
location: /var/log/harbor
|
||||||
|
|
||||||
|
# Uncomment following lines to enable external syslog endpoint.
|
||||||
|
# external_endpoint:
|
||||||
|
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||||
|
# protocol: tcp
|
||||||
|
# # The host of external endpoint
|
||||||
|
# host: localhost
|
||||||
|
# # Port of external endpoint
|
||||||
|
# port: 5140
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||||
|
_version: 2.12.0
|
||||||
|
{% if external_database is defined %}
|
||||||
|
# Uncomment external_database if using external database.
|
||||||
|
external_database:
|
||||||
|
harbor:
|
||||||
|
host: {{ external_database.harbor.host }}
|
||||||
|
port: {{ external_database.harbor.port }}
|
||||||
|
db_name: {{ external_database.harbor.db_name }}
|
||||||
|
username: {{ external_database.harbor.username }}
|
||||||
|
password: {{ external_database.harbor.password }}
|
||||||
|
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||||
|
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||||
|
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment external_database if using external database.
|
||||||
|
# external_database:
|
||||||
|
# harbor:
|
||||||
|
# host: harbor_db_host
|
||||||
|
# port: harbor_db_port
|
||||||
|
# db_name: harbor_db_name
|
||||||
|
# username: harbor_db_username
|
||||||
|
# password: harbor_db_password
|
||||||
|
# ssl_mode: disable
|
||||||
|
# max_idle_conns: 2
|
||||||
|
# max_open_conns: 0
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if redis is defined %}
|
||||||
|
redis:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
{% if redis.registry_db_index is defined %}
|
||||||
|
registry_db_index: {{ redis.registry_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # registry_db_index: 1
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.jobservice_db_index is defined %}
|
||||||
|
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # jobservice_db_index: 2
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.trivy_db_index is defined %}
|
||||||
|
trivy_db_index: {{ redis.trivy_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # trivy_db_index: 5
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.harbor_db_index is defined %}
|
||||||
|
harbor_db_index: {{ redis.harbor_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
{% endif %}
|
||||||
|
{% if redis.cache_layer_db_index is defined %}
|
||||||
|
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment redis if need to customize redis db
|
||||||
|
# redis:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
# # registry_db_index: 1
|
||||||
|
# # jobservice_db_index: 2
|
||||||
|
# # trivy_db_index: 5
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if external_redis is defined %}
|
||||||
|
external_redis:
|
||||||
|
# support redis, redis+sentinel
|
||||||
|
# host for redis: <host_redis>:<port_redis>
|
||||||
|
# host for redis+sentinel:
|
||||||
|
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||||
|
host: {{ external_redis.host }}
|
||||||
|
password: {{ external_redis.password }}
|
||||||
|
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||||
|
{% if external_redis.username is defined %}
|
||||||
|
username: {{ external_redis.username }}
|
||||||
|
{% else %}
|
||||||
|
# username:
|
||||||
|
{% endif %}
|
||||||
|
# sentinel_master_set must be set to support redis+sentinel
|
||||||
|
#sentinel_master_set:
|
||||||
|
# db_index 0 is for core, it's unchangeable
|
||||||
|
registry_db_index: {{ external_redis.registry_db_index }}
|
||||||
|
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||||
|
trivy_db_index: 5
|
||||||
|
idle_timeout_seconds: 30
|
||||||
|
{% if external_redis.harbor_db_index is defined %}
|
||||||
|
harbor_db_index: {{ redis.harbor_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
{% endif %}
|
||||||
|
{% if external_redis.cache_layer_db_index is defined %}
|
||||||
|
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||||
|
{% else %}
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
# Uncomments external_redis if using external Redis server
|
||||||
|
# external_redis:
|
||||||
|
# # support redis, redis+sentinel
|
||||||
|
# # host for redis: <host_redis>:<port_redis>
|
||||||
|
# # host for redis+sentinel:
|
||||||
|
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||||
|
# host: redis:6379
|
||||||
|
# password:
|
||||||
|
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||||
|
# # username:
|
||||||
|
# # sentinel_master_set must be set to support redis+sentinel
|
||||||
|
# #sentinel_master_set:
|
||||||
|
# # db_index 0 is for core, it's unchangeable
|
||||||
|
# registry_db_index: 1
|
||||||
|
# jobservice_db_index: 2
|
||||||
|
# trivy_db_index: 5
|
||||||
|
# idle_timeout_seconds: 30
|
||||||
|
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # harbor_db_index: 6
|
||||||
|
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||||
|
# # cache_layer_db_index: 7
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if uaa is defined %}
|
||||||
|
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||||
|
uaa:
|
||||||
|
ca_file: {{ uaa.ca_file }}
|
||||||
|
{% else %}
|
||||||
|
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||||
|
# uaa:
|
||||||
|
# ca_file: /path/to/ca
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
# Global proxy
|
||||||
|
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||||
|
# Components doesn't need to connect to each others via http proxy.
|
||||||
|
# Remove component from `components` array if want disable proxy
|
||||||
|
# for it. If you want use proxy for replication, MUST enable proxy
|
||||||
|
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||||
|
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||||
|
# for some special registry.
|
||||||
|
{% if proxy is defined %}
|
||||||
|
proxy:
|
||||||
|
http_proxy: {{ proxy.http_proxy or ''}}
|
||||||
|
https_proxy: {{ proxy.https_proxy or ''}}
|
||||||
|
no_proxy: {{ proxy.no_proxy or ''}}
|
||||||
|
{% if proxy.components is defined %}
|
||||||
|
components:
|
||||||
|
{% for component in proxy.components %}
|
||||||
|
{% if component != 'clair' %}
|
||||||
|
- {{component}}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% else %}
|
||||||
|
proxy:
|
||||||
|
http_proxy:
|
||||||
|
https_proxy:
|
||||||
|
no_proxy:
|
||||||
|
components:
|
||||||
|
- core
|
||||||
|
- jobservice
|
||||||
|
- trivy
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if metric is defined %}
|
||||||
|
metric:
|
||||||
|
enabled: {{ metric.enabled }}
|
||||||
|
port: {{ metric.port }}
|
||||||
|
path: {{ metric.path }}
|
||||||
|
{% else %}
|
||||||
|
# metric:
|
||||||
|
# enabled: false
|
||||||
|
# port: 9090
|
||||||
|
# path: /metrics
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Trace related config
|
||||||
|
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||||
|
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||||
|
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||||
|
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||||
|
{% if trace is defined %}
|
||||||
|
trace:
|
||||||
|
enabled: {{ trace.enabled | lower}}
|
||||||
|
sample_rate: {{ trace.sample_rate }}
|
||||||
|
# # namespace used to differentiate different harbor services
|
||||||
|
{% if trace.namespace is defined %}
|
||||||
|
namespace: {{ trace.namespace }}
|
||||||
|
{% else %}
|
||||||
|
# namespace:
|
||||||
|
{% endif %}
|
||||||
|
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||||
|
{% if trace.attributes is defined%}
|
||||||
|
attributes:
|
||||||
|
{% for name, value in trace.attributes.items() %}
|
||||||
|
{{name}}: {{value}}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
# attributes:
|
||||||
|
# application: harbor
|
||||||
|
{% endif %}
|
||||||
|
{% if trace.jaeger is defined%}
|
||||||
|
jaeger:
|
||||||
|
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||||
|
username: {{trace.jaeger.username or ''}}
|
||||||
|
password: {{trace.jaeger.password or ''}}
|
||||||
|
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||||
|
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||||
|
{% else %}
|
||||||
|
# jaeger:
|
||||||
|
# endpoint:
|
||||||
|
# username:
|
||||||
|
# password:
|
||||||
|
# agent_host:
|
||||||
|
# agent_port:
|
||||||
|
{% endif %}
|
||||||
|
{% if trace. otel is defined %}
|
||||||
|
otel:
|
||||||
|
endpoint: {{trace.otel.endpoint or '' }}
|
||||||
|
url_path: {{trace.otel.url_path or '' }}
|
||||||
|
compression: {{trace.otel.compression | lower }}
|
||||||
|
insecure: {{trace.otel.insecure | lower }}
|
||||||
|
timeout: {{trace.otel.timeout or '' }}
|
||||||
|
{% else %}
|
||||||
|
# otel:
|
||||||
|
# endpoint: hostname:4318
|
||||||
|
# url_path: /v1/traces
|
||||||
|
# compression: false
|
||||||
|
# insecure: true
|
||||||
|
# # timeout is in seconds
|
||||||
|
# timeout: 10
|
||||||
|
{% endif%}
|
||||||
|
{% else %}
|
||||||
|
# trace:
|
||||||
|
# enabled: true
|
||||||
|
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||||
|
# sample_rate: 1
|
||||||
|
# # # namespace used to differentiate different harbor services
|
||||||
|
# # namespace:
|
||||||
|
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||||
|
# # attributes:
|
||||||
|
# # application: harbor
|
||||||
|
# # jaeger:
|
||||||
|
# # endpoint: http://hostname:14268/api/traces
|
||||||
|
# # username:
|
||||||
|
# # password:
|
||||||
|
# # agent_host: hostname
|
||||||
|
# # agent_port: 6831
|
||||||
|
# # otel:
|
||||||
|
# # endpoint: hostname:4318
|
||||||
|
# # url_path: /v1/traces
|
||||||
|
# # compression: false
|
||||||
|
# # insecure: true
|
||||||
|
# # # timeout is in seconds
|
||||||
|
# # timeout: 10
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# enable purge _upload directories
|
||||||
|
{% if upload_purging is defined %}
|
||||||
|
upload_purging:
|
||||||
|
enabled: {{ upload_purging.enabled | lower}}
|
||||||
|
age: {{ upload_purging.age }}
|
||||||
|
interval: {{ upload_purging.interval }}
|
||||||
|
dryrun: {{ upload_purging.dryrun | lower}}
|
||||||
|
{% else %}
|
||||||
|
upload_purging:
|
||||||
|
enabled: true
|
||||||
|
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||||
|
age: 168h
|
||||||
|
# the interval of the purge operations
|
||||||
|
interval: 24h
|
||||||
|
dryrun: false
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Cache layer related config
|
||||||
|
{% if cache is defined %}
|
||||||
|
cache:
|
||||||
|
enabled: {{ cache.enabled | lower}}
|
||||||
|
expire_hours: {{ cache.expire_hours }}
|
||||||
|
{% else %}
|
||||||
|
cache:
|
||||||
|
enabled: false
|
||||||
|
expire_hours: 24
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# Harbor core configurations
|
||||||
|
# Uncomment to enable the following harbor core related configuration items.
|
||||||
|
{% if core is defined %}
|
||||||
|
core:
|
||||||
|
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||||
|
# by default is implemented by db but you can switch the updation via redis which
|
||||||
|
# can improve the performance of high concurrent pushing to the same project,
|
||||||
|
# and reduce the database connections spike and occupies.
|
||||||
|
# By redis will bring up some delay for quota usage updation for display, so only
|
||||||
|
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||||
|
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||||
|
quota_update_provider: {{ core.quota_update_provider }}
|
||||||
|
{% else %}
|
||||||
|
# core:
|
||||||
|
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||||
|
# # by default is implemented by db but you can switch the updation via redis which
|
||||||
|
# # can improve the performance of high concurrent pushing to the same project,
|
||||||
|
# # and reduce the database connections spike and occupies.
|
||||||
|
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||||
|
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||||
|
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||||
|
# quota_update_provider: redis # Or db
|
||||||
|
{% endif %}
|
@ -40,7 +40,7 @@ REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
|
|||||||
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||||
CSRF_KEY={{csrf_key}}
|
CSRF_KEY={{csrf_key}}
|
||||||
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
||||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
||||||
|
|
||||||
HTTP_PROXY={{core_http_proxy}}
|
HTTP_PROXY={{core_http_proxy}}
|
||||||
HTTPS_PROXY={{core_https_proxy}}
|
HTTPS_PROXY={{core_https_proxy}}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM golang:1.22.3
|
FROM golang:1.23.2
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||||
ENV BUILDTAGS include_oss include_gcs
|
ENV BUILDTAGS include_oss include_gcs
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
FROM golang:1.22.3
|
FROM golang:1.23.2
|
||||||
|
|
||||||
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
ADD . /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||||
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
WORKDIR /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||||
|
|
||||||
RUN export GOOS=linux GO111MODULE=on CGO_ENABLED=0 && \
|
RUN export GOOS=linux GO111MODULE=on CGO_ENABLED=0 && \
|
||||||
go build -o scanner-trivy cmd/scanner-trivy/main.go
|
go build -o scanner-trivy cmd/scanner-trivy/main.go
|
||||||
|
@ -16,16 +16,16 @@ cur=$PWD
|
|||||||
|
|
||||||
# The temporary directory to clone Trivy adapter source code
|
# The temporary directory to clone Trivy adapter source code
|
||||||
TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
||||||
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
|
git clone https://github.com/goharbor/harbor-scanner-trivy.git $TEMP
|
||||||
cd $TEMP; git checkout $VERSION; cd -
|
cd $TEMP; git checkout $VERSION; cd -
|
||||||
|
|
||||||
echo "Building Trivy adapter binary based on golang:1.22.3..."
|
echo "Building Trivy adapter binary based on golang:1.23.2..."
|
||||||
cp Dockerfile.binary $TEMP
|
cp Dockerfile.binary $TEMP
|
||||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||||
|
|
||||||
echo "Copying Trivy adapter binary from the container to the local directory..."
|
echo "Copying Trivy adapter binary from the container to the local directory..."
|
||||||
ID=$(docker create trivy-adapter-golang)
|
ID=$(docker create trivy-adapter-golang)
|
||||||
docker cp $ID:/go/src/github.com/aquasecurity/harbor-scanner-trivy/scanner-trivy binary
|
docker cp $ID:/go/src/github.com/goharbor/harbor-scanner-trivy/scanner-trivy binary
|
||||||
|
|
||||||
docker rm -f $ID
|
docker rm -f $ID
|
||||||
docker rmi -f trivy-adapter-golang
|
docker rmi -f trivy-adapter-golang
|
||||||
|
@ -50,12 +50,19 @@ fi
|
|||||||
secret_dir=${data_path}/secret
|
secret_dir=${data_path}/secret
|
||||||
config_dir=$harbor_prepare_path/common/config
|
config_dir=$harbor_prepare_path/common/config
|
||||||
|
|
||||||
|
# Set the prepare base dir, for mac, it should be $HOME, for linux, it should be /
|
||||||
|
# The certificate and the data directory in harbor.yaml should be sub directories of $HOME when installing Harbor in MacOS
|
||||||
|
prepare_base_dir=/
|
||||||
|
if [ "$(uname)" == "Darwin" ]; then
|
||||||
|
prepare_base_dir=$HOME
|
||||||
|
fi
|
||||||
|
|
||||||
# Run prepare script
|
# Run prepare script
|
||||||
docker run --rm -v $input_dir:/input \
|
docker run --rm -v $input_dir:/input \
|
||||||
-v $data_path:/data \
|
-v $data_path:/data \
|
||||||
-v $harbor_prepare_path:/compose_location \
|
-v $harbor_prepare_path:/compose_location \
|
||||||
-v $config_dir:/config \
|
-v $config_dir:/config \
|
||||||
-v /:/hostfs \
|
-v ${prepare_base_dir}:/hostfs${prepare_base_dir} \
|
||||||
--privileged \
|
--privileged \
|
||||||
goharbor/prepare:dev prepare $@
|
goharbor/prepare:dev prepare $@
|
||||||
|
|
||||||
|
@ -14,7 +14,9 @@
|
|||||||
|
|
||||||
package rbac
|
package rbac
|
||||||
|
|
||||||
import "github.com/goharbor/harbor/src/pkg/permission/types"
|
import (
|
||||||
|
"github.com/goharbor/harbor/src/pkg/permission/types"
|
||||||
|
)
|
||||||
|
|
||||||
// const action variables
|
// const action variables
|
||||||
const (
|
const (
|
||||||
@ -81,9 +83,86 @@ const (
|
|||||||
ResourceSecurityHub = Resource("security-hub")
|
ResourceSecurityHub = Resource("security-hub")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type scope string
|
||||||
|
|
||||||
|
const (
|
||||||
|
ScopeSystem = scope("System")
|
||||||
|
ScopeProject = scope("Project")
|
||||||
|
)
|
||||||
|
|
||||||
|
// RobotPermissionProvider defines the permission provider for robot account
|
||||||
|
type RobotPermissionProvider interface {
|
||||||
|
GetPermissions(s scope) []*types.Policy
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissionProvider gives the robot permission provider
|
||||||
|
func GetPermissionProvider() RobotPermissionProvider {
|
||||||
|
// TODO will determine by the ui configuration
|
||||||
|
return &NolimitProvider{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseProvider ...
|
||||||
|
type BaseProvider struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions ...
|
||||||
|
func (d *BaseProvider) GetPermissions(s scope) []*types.Policy {
|
||||||
|
return PoliciesMap[s]
|
||||||
|
}
|
||||||
|
|
||||||
|
// NolimitProvider ...
|
||||||
|
type NolimitProvider struct {
|
||||||
|
BaseProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPermissions ...
|
||||||
|
func (n *NolimitProvider) GetPermissions(s scope) []*types.Policy {
|
||||||
|
if s == ScopeSystem {
|
||||||
|
return append(n.BaseProvider.GetPermissions(ScopeSystem),
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionRead},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionList},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionDelete},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceUser, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceUser, Action: ActionRead},
|
||||||
|
&types.Policy{Resource: ResourceUser, Action: ActionUpdate},
|
||||||
|
&types.Policy{Resource: ResourceUser, Action: ActionList},
|
||||||
|
&types.Policy{Resource: ResourceUser, Action: ActionDelete},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceLdapUser, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceLdapUser, Action: ActionList},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceExportCVE, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceExportCVE, Action: ActionRead},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceQuota, Action: ActionUpdate},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceUserGroup, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceUserGroup, Action: ActionRead},
|
||||||
|
&types.Policy{Resource: ResourceUserGroup, Action: ActionUpdate},
|
||||||
|
&types.Policy{Resource: ResourceUserGroup, Action: ActionList},
|
||||||
|
&types.Policy{Resource: ResourceUserGroup, Action: ActionDelete})
|
||||||
|
}
|
||||||
|
if s == ScopeProject {
|
||||||
|
return append(n.BaseProvider.GetPermissions(ScopeProject),
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionRead},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionList},
|
||||||
|
&types.Policy{Resource: ResourceRobot, Action: ActionDelete},
|
||||||
|
|
||||||
|
&types.Policy{Resource: ResourceMember, Action: ActionCreate},
|
||||||
|
&types.Policy{Resource: ResourceMember, Action: ActionRead},
|
||||||
|
&types.Policy{Resource: ResourceMember, Action: ActionUpdate},
|
||||||
|
&types.Policy{Resource: ResourceMember, Action: ActionList},
|
||||||
|
&types.Policy{Resource: ResourceMember, Action: ActionDelete})
|
||||||
|
}
|
||||||
|
return []*types.Policy{}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
PoliciesMap = map[string][]*types.Policy{
|
PoliciesMap = map[scope][]*types.Policy{
|
||||||
"System": {
|
ScopeSystem: {
|
||||||
{Resource: ResourceAuditLog, Action: ActionList},
|
{Resource: ResourceAuditLog, Action: ActionList},
|
||||||
|
|
||||||
{Resource: ResourcePreatInstance, Action: ActionRead},
|
{Resource: ResourcePreatInstance, Action: ActionRead},
|
||||||
@ -154,7 +233,7 @@ var (
|
|||||||
{Resource: ResourceQuota, Action: ActionRead},
|
{Resource: ResourceQuota, Action: ActionRead},
|
||||||
{Resource: ResourceQuota, Action: ActionList},
|
{Resource: ResourceQuota, Action: ActionList},
|
||||||
},
|
},
|
||||||
"Project": {
|
ScopeProject: {
|
||||||
{Resource: ResourceLog, Action: ActionList},
|
{Resource: ResourceLog, Action: ActionList},
|
||||||
|
|
||||||
{Resource: ResourceProject, Action: ActionRead},
|
{Resource: ResourceProject, Action: ActionRead},
|
||||||
|
36
src/common/rbac/const_test.go
Normal file
36
src/common/rbac/const_test.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package rbac
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/goharbor/harbor/src/pkg/config/inmemory"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBaseProvider(t *testing.T) {
|
||||||
|
permissionProvider := &BaseProvider{}
|
||||||
|
sysPermissions := permissionProvider.GetPermissions(ScopeSystem)
|
||||||
|
|
||||||
|
for _, per := range sysPermissions {
|
||||||
|
if per.Action == ActionCreate && per.Resource == ResourceRobot {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNolimitProvider(t *testing.T) {
|
||||||
|
permissionProvider := &BaseProvider{}
|
||||||
|
sysPermissions := permissionProvider.GetPermissions(ScopeSystem)
|
||||||
|
|
||||||
|
for _, per := range sysPermissions {
|
||||||
|
if per.Action == ActionCreate && per.Resource == ResourceRobot {
|
||||||
|
t.Log("no limit provider has the permission of robot account creation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetPermissionProvider(t *testing.T) {
|
||||||
|
defaultPro := GetPermissionProvider()
|
||||||
|
_, ok := defaultPro.(*NolimitProvider)
|
||||||
|
assert.True(t, ok)
|
||||||
|
}
|
@ -58,7 +58,7 @@ func NewBuilderForUser(user *models.User, ctl project.Controller) RBACUserBuilde
|
|||||||
// NewBuilderForPolicies create a builder for the policies
|
// NewBuilderForPolicies create a builder for the policies
|
||||||
func NewBuilderForPolicies(username string, policies []*types.Policy,
|
func NewBuilderForPolicies(username string, policies []*types.Policy,
|
||||||
filters ...func(*proModels.Project, []*types.Policy) []*types.Policy) RBACUserBuilder {
|
filters ...func(*proModels.Project, []*types.Policy) []*types.Policy) RBACUserBuilder {
|
||||||
return func(ctx context.Context, p *proModels.Project) types.RBACUser {
|
return func(_ context.Context, p *proModels.Project) types.RBACUser {
|
||||||
for _, filter := range filters {
|
for _, filter := range filters {
|
||||||
policies = filter(p, policies)
|
policies = filter(p, policies)
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ import (
|
|||||||
|
|
||||||
// NewEvaluator create evaluator for the system
|
// NewEvaluator create evaluator for the system
|
||||||
func NewEvaluator(username string, policies []*types.Policy) evaluator.Evaluator {
|
func NewEvaluator(username string, policies []*types.Policy) evaluator.Evaluator {
|
||||||
return namespace.New(NamespaceKind, func(ctx context.Context, ns types.Namespace) evaluator.Evaluator {
|
return namespace.New(NamespaceKind, func(_ context.Context, _ types.Namespace) evaluator.Evaluator {
|
||||||
return rbac.New(&rbacUser{
|
return rbac.New(&rbacUser{
|
||||||
username: username,
|
username: username,
|
||||||
policies: policies,
|
policies: policies,
|
||||||
|
@ -106,7 +106,7 @@ func parseV1alpha1Icon(artifact *artifact.Artifact, manifest *v1.Manifest, reg r
|
|||||||
switch contentType {
|
switch contentType {
|
||||||
case GIF, PNG, JPEG:
|
case GIF, PNG, JPEG:
|
||||||
default:
|
default:
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("unsupported content type: %s", contentType)
|
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("unsupported content type: %s", contentType)
|
||||||
}
|
}
|
||||||
artifact.Icon = iconDigest
|
artifact.Icon = iconDigest
|
||||||
return nil
|
return nil
|
||||||
|
@ -307,7 +307,7 @@ func (c *controller) getByTag(ctx context.Context, repository, tag string, optio
|
|||||||
}
|
}
|
||||||
if len(tags) == 0 {
|
if len(tags) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("artifact %s:%s not found", repository, tag)
|
WithMessagef("artifact %s:%s not found", repository, tag)
|
||||||
}
|
}
|
||||||
return c.Get(ctx, tags[0].ArtifactID, option)
|
return c.Get(ctx, tags[0].ArtifactID, option)
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func (m *IndexProcessor) AbstractMetadata(_ context.Context, _ *artifact.Artifac
|
|||||||
// AbstractAddition abstracts the addition of artifact
|
// AbstractAddition abstracts the addition of artifact
|
||||||
func (m *IndexProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
func (m *IndexProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported", addition)
|
WithMessagef("addition %s isn't supported", addition)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetArtifactType returns the artifact type
|
// GetArtifactType returns the artifact type
|
||||||
|
@ -66,7 +66,7 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
|
|||||||
// AbstractAddition abstracts the addition of artifact
|
// AbstractAddition abstracts the addition of artifact
|
||||||
func (m *ManifestProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
func (m *ManifestProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported", addition)
|
WithMessagef("addition %s isn't supported", addition)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetArtifactType returns the artifact type
|
// GetArtifactType returns the artifact type
|
||||||
|
@ -61,7 +61,7 @@ type processor struct {
|
|||||||
func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
|
func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
|
||||||
if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies {
|
if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported for %s", addition, ArtifactTypeChart)
|
WithMessagef("addition %s isn't supported for %s", addition, ArtifactTypeChart)
|
||||||
}
|
}
|
||||||
|
|
||||||
m, _, err := p.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
m, _, err := p.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||||
|
@ -132,5 +132,5 @@ func (d *defaultProcessor) AbstractAddition(_ context.Context, artifact *artifac
|
|||||||
// It will be support in the future.
|
// It will be support in the future.
|
||||||
// return error directly
|
// return error directly
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("the processor for artifact %s not found, cannot get the addition", artifact.Type)
|
WithMessagef("the processor for artifact %s not found, cannot get the addition", artifact.Type)
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *arti
|
|||||||
|
|
||||||
func (m *manifestV1Processor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
func (m *manifestV1Processor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
|
WithMessagef("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manifestV1Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
func (m *manifestV1Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||||
|
@ -87,7 +87,7 @@ func (m *manifestV2Processor) AbstractMetadata(ctx context.Context, artifact *ar
|
|||||||
func (m *manifestV2Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
func (m *manifestV2Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||||
if addition != AdditionTypeBuildHistory {
|
if addition != AdditionTypeBuildHistory {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeImage)
|
WithMessagef("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeImage)
|
||||||
}
|
}
|
||||||
|
|
||||||
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||||
|
@ -103,7 +103,7 @@ func (m *Processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact
|
|||||||
func (m *Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
func (m *Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||||
if addition != AdditionTypeBuildHistory {
|
if addition != AdditionTypeBuildHistory {
|
||||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeWASM)
|
WithMessagef("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeWASM)
|
||||||
}
|
}
|
||||||
|
|
||||||
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||||
|
@ -248,7 +248,7 @@ func (c *controller) Get(ctx context.Context, digest string, options ...Option)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if len(blobs) == 0 {
|
} else if len(blobs) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("blob %s not found", digest)
|
return nil, errors.NotFoundError(nil).WithMessagef("blob %s not found", digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
return blobs[0], nil
|
return blobs[0], nil
|
||||||
@ -363,7 +363,7 @@ func (c *controller) Touch(ctx context.Context, blob *blob.Blob) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return errors.New(nil).WithMessage(fmt.Sprintf("no blob item is updated to StatusNone, id:%d, digest:%s", blob.ID, blob.Digest)).WithCode(errors.NotFoundCode)
|
return errors.New(nil).WithMessagef("no blob item is updated to StatusNone, id:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -375,7 +375,7 @@ func (c *controller) Fail(ctx context.Context, blob *blob.Blob) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return errors.New(nil).WithMessage(fmt.Sprintf("no blob item is updated to StatusDeleteFailed, id:%d, digest:%s", blob.ID, blob.Digest)).WithCode(errors.NotFoundCode)
|
return errors.New(nil).WithMessagef("no blob item is updated to StatusDeleteFailed, id:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -182,11 +182,11 @@ func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error
|
|||||||
// the cfgs is unmarshal from json string, the number type will be float64
|
// the cfgs is unmarshal from json string, the number type will be float64
|
||||||
if vf, ok := v.(float64); ok {
|
if vf, ok := v.(float64); ok {
|
||||||
if vf <= 0 {
|
if vf <= 0 {
|
||||||
return errors.BadRequestError(nil).WithMessage("the %s value must be positive", c)
|
return errors.BadRequestError(nil).WithMessagef("the %s value must be positive", c)
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(vf) > maxValue {
|
if int64(vf) > maxValue {
|
||||||
return errors.BadRequestError(nil).WithMessage(fmt.Sprintf("the %s value is over the limit value: %d", c, maxValue))
|
return errors.BadRequestError(nil).WithMessagef("the %s value is over the limit value: %d", c, maxValue)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, er
|
|||||||
}
|
}
|
||||||
if len(execs) == 0 {
|
if len(execs) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("garbage collection execution %d not found", id)
|
WithMessagef("garbage collection execution %d not found", id)
|
||||||
}
|
}
|
||||||
return convertExecution(execs[0]), nil
|
return convertExecution(execs[0]), nil
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) {
|
|||||||
}
|
}
|
||||||
if len(tasks) == 0 {
|
if len(tasks) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("garbage collection task %d not found", id)
|
WithMessagef("garbage collection task %d not found", id)
|
||||||
}
|
}
|
||||||
return convertTask(tasks[0]), nil
|
return convertTask(tasks[0]), nil
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ func (c *controller) Get(ctx context.Context, digest string) (*Icon, error) {
|
|||||||
}
|
}
|
||||||
if len(artifacts) == 0 {
|
if len(artifacts) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("the icon %s not found", digest)
|
WithMessagef("the icon %s not found", digest)
|
||||||
}
|
}
|
||||||
_, iconFile, err = c.regCli.PullBlob(artifacts[0].RepositoryName, digest)
|
_, iconFile, err = c.regCli.PullBlob(artifacts[0].RepositoryName, digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -135,20 +135,20 @@ func (c *controller) Create(ctx context.Context, projectNameOrID interface{}, re
|
|||||||
if req.MemberUser.UserID > 0 {
|
if req.MemberUser.UserID > 0 {
|
||||||
user, err := c.userManager.Get(ctx, req.MemberUser.UserID)
|
user, err := c.userManager.Get(ctx, req.MemberUser.UserID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("Failed to get user %d: %v", req.MemberUser.UserID, err)
|
return 0, errors.BadRequestError(nil).WithMessagef("Failed to get user %d: %v", req.MemberUser.UserID, err)
|
||||||
}
|
}
|
||||||
if user == nil {
|
if user == nil {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("User %d not found", req.MemberUser.UserID)
|
return 0, errors.BadRequestError(nil).WithMessagef("User %d not found", req.MemberUser.UserID)
|
||||||
}
|
}
|
||||||
member.EntityID = req.MemberUser.UserID
|
member.EntityID = req.MemberUser.UserID
|
||||||
member.EntityType = common.UserMember
|
member.EntityType = common.UserMember
|
||||||
} else if req.MemberGroup.ID > 0 {
|
} else if req.MemberGroup.ID > 0 {
|
||||||
g, err := c.groupManager.Get(ctx, req.MemberGroup.ID)
|
g, err := c.groupManager.Get(ctx, req.MemberGroup.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("Failed to get group %d: %v", req.MemberGroup.ID, err)
|
return 0, errors.BadRequestError(nil).WithMessagef("Failed to get group %d: %v", req.MemberGroup.ID, err)
|
||||||
}
|
}
|
||||||
if g == nil {
|
if g == nil {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("Group %d not found", req.MemberGroup.ID)
|
return 0, errors.BadRequestError(nil).WithMessagef("Group %d not found", req.MemberGroup.ID)
|
||||||
}
|
}
|
||||||
member.EntityID = req.MemberGroup.ID
|
member.EntityID = req.MemberGroup.ID
|
||||||
} else if len(req.MemberUser.Username) > 0 {
|
} else if len(req.MemberUser.Username) > 0 {
|
||||||
|
@ -219,7 +219,7 @@ func (c *controller) DeleteInstance(ctx context.Context, id int64) error {
|
|||||||
if len(policies) > 0 {
|
if len(policies) > 0 {
|
||||||
return errors.New(nil).
|
return errors.New(nil).
|
||||||
WithCode(errors.PreconditionCode).
|
WithCode(errors.PreconditionCode).
|
||||||
WithMessage("Provider [%s] cannot be deleted as some preheat policies are using it", ins.Name)
|
WithMessagef("Provider [%s] cannot be deleted as some preheat policies are using it", ins.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.iManager.Delete(ctx, id)
|
return c.iManager.Delete(ctx, id)
|
||||||
@ -246,7 +246,7 @@ func (c *controller) UpdateInstance(ctx context.Context, instance *providerModel
|
|||||||
if len(policies) > 0 {
|
if len(policies) > 0 {
|
||||||
return errors.New(nil).
|
return errors.New(nil).
|
||||||
WithCode(errors.PreconditionCode).
|
WithCode(errors.PreconditionCode).
|
||||||
WithMessage("Provider [%s] cannot be disabled as some preheat policies are using it", oldIns.Name)
|
WithMessagef("Provider [%s] cannot be disabled as some preheat policies are using it", oldIns.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +123,7 @@ func flushQuota(ctx context.Context) {
|
|||||||
iter, err := cache.Default().Scan(ctx, "quota:*")
|
iter, err := cache.Default().Scan(ctx, "quota:*")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to scan out the quota records from redis")
|
log.Errorf("failed to scan out the quota records from redis")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for iter.Next(ctx) {
|
for iter.Next(ctx) {
|
||||||
@ -349,7 +350,7 @@ func (c *controller) updateUsageWithRetry(ctx context.Context, reference, refere
|
|||||||
options := []retry.Option{
|
options := []retry.Option{
|
||||||
retry.Timeout(defaultRetryTimeout),
|
retry.Timeout(defaultRetryTimeout),
|
||||||
retry.Backoff(false),
|
retry.Backoff(false),
|
||||||
retry.Callback(func(err error, sleep time.Duration) {
|
retry.Callback(func(err error, _ time.Duration) {
|
||||||
log.G(ctx).Debugf("failed to update the quota usage for %s %s, error: %v", reference, referenceID, err)
|
log.G(ctx).Debugf("failed to update the quota usage for %s %s, error: %v", reference, referenceID, err)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
@ -488,7 +489,7 @@ func reserveResources(resources types.ResourceList) func(hardLimits, used types.
|
|||||||
newUsed := types.Add(used, resources)
|
newUsed := types.Add(used, resources)
|
||||||
|
|
||||||
if err := quota.IsSafe(hardLimits, used, newUsed, false); err != nil {
|
if err := quota.IsSafe(hardLimits, used, newUsed, false); err != nil {
|
||||||
return nil, errors.DeniedError(err).WithMessage("Quota exceeded when processing the request of %v", err)
|
return nil, errors.DeniedError(err).WithMessagef("Quota exceeded when processing the request of %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newUsed, nil
|
return newUsed, nil
|
||||||
@ -496,7 +497,7 @@ func reserveResources(resources types.ResourceList) func(hardLimits, used types.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rollbackResources(resources types.ResourceList) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
func rollbackResources(resources types.ResourceList) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||||
return func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
return func(_, used types.ResourceList) (types.ResourceList, error) {
|
||||||
newUsed := types.Subtract(used, resources)
|
newUsed := types.Subtract(used, resources)
|
||||||
// ensure that new used is never negative
|
// ensure that new used is never negative
|
||||||
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
||||||
|
@ -75,7 +75,7 @@ func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader
|
|||||||
for _, projectID := range projectIDs {
|
for _, projectID := range projectIDs {
|
||||||
project, ok := projectsMap[projectID]
|
project, ok := projectsMap[projectID]
|
||||||
if !ok {
|
if !ok {
|
||||||
err := errors.NotFoundError(nil).WithMessage("project %d not found", projectID)
|
err := errors.NotFoundError(nil).WithMessagef("project %d not found", projectID)
|
||||||
return handleError(err)
|
return handleError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by replication policies, cannot delete it", id)
|
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||||
}
|
}
|
||||||
// referenced by replication policy as destination registry
|
// referenced by replication policy as destination registry
|
||||||
count, err = c.repMgr.Count(ctx, &q.Query{
|
count, err = c.repMgr.Count(ctx, &q.Query{
|
||||||
@ -148,7 +148,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by replication policies, cannot delete it", id)
|
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||||
}
|
}
|
||||||
// referenced by proxy cache project
|
// referenced by proxy cache project
|
||||||
count, err = c.proMgr.Count(ctx, &q.Query{
|
count, err = c.proMgr.Count(ctx, &q.Query{
|
||||||
@ -160,7 +160,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by proxy cache project, cannot delete it", id)
|
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by proxy cache project, cannot delete it", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.regMgr.Delete(ctx, id)
|
return c.regMgr.Delete(ctx, id)
|
||||||
|
@ -102,7 +102,7 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
|||||||
logger := log.GetLogger(ctx)
|
logger := log.GetLogger(ctx)
|
||||||
if !policy.Enabled {
|
if !policy.Enabled {
|
||||||
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
||||||
WithMessage("the policy %d is disabled", policy.ID)
|
WithMessagef("the policy %d is disabled", policy.ID)
|
||||||
}
|
}
|
||||||
// create an execution record
|
// create an execution record
|
||||||
extra := make(map[string]interface{})
|
extra := make(map[string]interface{})
|
||||||
@ -213,7 +213,7 @@ func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, er
|
|||||||
}
|
}
|
||||||
if len(execs) == 0 {
|
if len(execs) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("replication execution %d not found", id)
|
WithMessagef("replication execution %d not found", id)
|
||||||
}
|
}
|
||||||
return convertExecution(execs[0]), nil
|
return convertExecution(execs[0]), nil
|
||||||
}
|
}
|
||||||
@ -250,7 +250,7 @@ func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) {
|
|||||||
}
|
}
|
||||||
if len(tasks) == 0 {
|
if len(tasks) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("replication task %d not found", id)
|
WithMessagef("replication task %d not found", id)
|
||||||
}
|
}
|
||||||
return convertTask(tasks[0]), nil
|
return convertTask(tasks[0]), nil
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package flow
|
package flow
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package flow
|
package flow
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ func replaceNamespace(repository string, namespace string, replaceCount int8, ds
|
|||||||
dstRepoPrefix = namespace
|
dstRepoPrefix = namespace
|
||||||
case int(replaceCount) > srcLength-1: // invalid replace count
|
case int(replaceCount) > srcLength-1: // invalid replace count
|
||||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).
|
return "", errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("the source repository %q contains only %d path components %v excepting the last one, but the destination namespace flattening level is %d",
|
WithMessagef("the source repository %q contains only %d path components %v excepting the last one, but the destination namespace flattening level is %d",
|
||||||
repository, srcLength-1, srcRepoPathComponents[:srcLength-1], replaceCount)
|
repository, srcLength-1, srcRepoPathComponents[:srcLength-1], replaceCount)
|
||||||
default:
|
default:
|
||||||
dstRepoPrefix = namespace + "/" + strings.Join(srcRepoPathComponents[replaceCount:srcLength-1], "/")
|
dstRepoPrefix = namespace + "/" + strings.Join(srcRepoPathComponents[replaceCount:srcLength-1], "/")
|
||||||
@ -216,12 +216,12 @@ func replaceNamespace(repository string, namespace string, replaceCount int8, ds
|
|||||||
switch dstRepoComponentPathType {
|
switch dstRepoComponentPathType {
|
||||||
case model.RepositoryPathComponentTypeOnlyTwo:
|
case model.RepositoryPathComponentTypeOnlyTwo:
|
||||||
if dstLength != 2 {
|
if dstLength != 2 {
|
||||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("the destination repository %q contains %d path components %v, but the destination registry only supports 2",
|
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("the destination repository %q contains %d path components %v, but the destination registry only supports 2",
|
||||||
dstRepo, dstLength, dstRepoPathComponents)
|
dstRepo, dstLength, dstRepoPathComponents)
|
||||||
}
|
}
|
||||||
case model.RepositoryPathComponentTypeAtLeastTwo:
|
case model.RepositoryPathComponentTypeAtLeastTwo:
|
||||||
if dstLength < 2 {
|
if dstLength < 2 {
|
||||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("the destination repository %q contains only %d path components %v, but the destination registry requires at least 2",
|
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("the destination repository %q contains only %d path components %v, but the destination registry requires at least 2",
|
||||||
dstRepo, dstLength, dstRepoPathComponents)
|
dstRepo, dstLength, dstRepoPathComponents)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package replication
|
package replication
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ func (p *Policy) Validate() error {
|
|||||||
if len(p.DestNamespace) > 0 {
|
if len(p.DestNamespace) > 0 {
|
||||||
if !lib.RepositoryNameRe.MatchString(p.DestNamespace) {
|
if !lib.RepositoryNameRe.MatchString(p.DestNamespace) {
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("invalid destination namespace: %s", p.DestNamespace)
|
WithMessagef("invalid destination namespace: %s", p.DestNamespace)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,11 +102,11 @@ func (p *Policy) Validate() error {
|
|||||||
case model.TriggerTypeScheduled:
|
case model.TriggerTypeScheduled:
|
||||||
if p.Trigger.Settings == nil || len(p.Trigger.Settings.Cron) == 0 {
|
if p.Trigger.Settings == nil || len(p.Trigger.Settings.Cron) == 0 {
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("the cron string cannot be empty when the trigger type is %s", model.TriggerTypeScheduled)
|
WithMessagef("the cron string cannot be empty when the trigger type is %s", model.TriggerTypeScheduled)
|
||||||
}
|
}
|
||||||
if _, err := utils.CronParser().Parse(p.Trigger.Settings.Cron); err != nil {
|
if _, err := utils.CronParser().Parse(p.Trigger.Settings.Cron); err != nil {
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("invalid cron string for scheduled trigger: %s", p.Trigger.Settings.Cron)
|
WithMessagef("invalid cron string for scheduled trigger: %s", p.Trigger.Settings.Cron)
|
||||||
}
|
}
|
||||||
cronParts := strings.Split(p.Trigger.Settings.Cron, " ")
|
cronParts := strings.Split(p.Trigger.Settings.Cron, " ")
|
||||||
if cronParts[0] != "0" {
|
if cronParts[0] != "0" {
|
||||||
|
@ -36,8 +36,6 @@ import (
|
|||||||
"github.com/goharbor/harbor/src/pkg/task"
|
"github.com/goharbor/harbor/src/pkg/task"
|
||||||
)
|
)
|
||||||
|
|
||||||
// go:generate mockery -name Controller -case snake
|
|
||||||
|
|
||||||
// Controller to handle the requests related with retention
|
// Controller to handle the requests related with retention
|
||||||
type Controller interface {
|
type Controller interface {
|
||||||
GetRetention(ctx context.Context, id int64) (*policy.Metadata, error)
|
GetRetention(ctx context.Context, id int64) (*policy.Metadata, error)
|
||||||
|
@ -97,10 +97,6 @@ func (d *controller) Count(ctx context.Context, query *q.Query) (int64, error) {
|
|||||||
|
|
||||||
// Create ...
|
// Create ...
|
||||||
func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error) {
|
func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error) {
|
||||||
if err := d.setProject(ctx, r); err != nil {
|
|
||||||
return 0, "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
var expiresAt int64
|
var expiresAt int64
|
||||||
if r.Duration == -1 {
|
if r.Duration == -1 {
|
||||||
expiresAt = -1
|
expiresAt = -1
|
||||||
@ -327,22 +323,6 @@ func (d *controller) populatePermissions(ctx context.Context, r *Robot) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the project info if it's a project level robot
|
|
||||||
func (d *controller) setProject(ctx context.Context, r *Robot) error {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if r.Level == LEVELPROJECT {
|
|
||||||
pro, err := d.proMgr.Get(ctx, r.Permissions[0].Namespace)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r.ProjectName = pro.Name
|
|
||||||
r.ProjectID = pro.ProjectID
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// convertScope converts the db scope into robot model
|
// convertScope converts the db scope into robot model
|
||||||
// /system => Kind: system Namespace: /
|
// /system => Kind: system Namespace: /
|
||||||
// /project/* => Kind: project Namespace: *
|
// /project/* => Kind: project Namespace: *
|
||||||
@ -394,6 +374,22 @@ func (d *controller) toScope(ctx context.Context, p *Permission) (string, error)
|
|||||||
return "", errors.New(nil).WithMessage("unknown robot kind").WithCode(errors.BadRequestCode)
|
return "", errors.New(nil).WithMessage("unknown robot kind").WithCode(errors.BadRequestCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// set the project info if it's a project level robot
|
||||||
|
func SetProject(ctx context.Context, r *Robot) error {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if r.Level == LEVELPROJECT {
|
||||||
|
pro, err := project.New().Get(ctx, r.Permissions[0].Namespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r.ProjectName = pro.Name
|
||||||
|
r.ProjectID = pro.ProjectID
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func CreateSec(salt ...string) (string, string, string, error) {
|
func CreateSec(salt ...string) (string, string, string, error) {
|
||||||
var secret, pwd string
|
var secret, pwd string
|
||||||
options := []retry.Option{
|
options := []retry.Option{
|
||||||
|
@ -39,10 +39,11 @@ const (
|
|||||||
// Robot ...
|
// Robot ...
|
||||||
type Robot struct {
|
type Robot struct {
|
||||||
model.Robot
|
model.Robot
|
||||||
ProjectName string
|
ProjectName string
|
||||||
Level string
|
ProjectNameOrID interface{}
|
||||||
Editable bool `json:"editable"`
|
Level string
|
||||||
Permissions []*Permission `json:"permissions"`
|
Editable bool `json:"editable"`
|
||||||
|
Permissions []*Permission `json:"permissions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSysLevel, true is a system level robot, others are project level.
|
// IsSysLevel, true is a system level robot, others are project level.
|
||||||
|
@ -243,12 +243,12 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||||||
|
|
||||||
// In case it does not exist
|
// In case it does not exist
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return errors.PreconditionFailedError(nil).WithMessage("no available scanner for project: %d", artifact.ProjectID)
|
return errors.PreconditionFailedError(nil).WithMessagef("no available scanner for project: %d", artifact.ProjectID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if it is disabled
|
// Check if it is disabled
|
||||||
if r.Disabled {
|
if r.Disabled {
|
||||||
return errors.PreconditionFailedError(nil).WithMessage("scanner %s is deactivated", r.Name)
|
return errors.PreconditionFailedError(nil).WithMessagef("scanner %s is deactivated", r.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
||||||
@ -266,7 +266,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||||||
// skip to return err for event related scan
|
// skip to return err for event related scan
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
return errors.BadRequestError(nil).WithMessagef("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -376,8 +376,7 @@ func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact, capT
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(executions) == 0 {
|
if len(executions) == 0 {
|
||||||
message := fmt.Sprintf("no scan job for artifact digest=%v", artifact.Digest)
|
return errors.BadRequestError(nil).WithMessagef("no scan job for artifact digest=%v", artifact.Digest)
|
||||||
return errors.BadRequestError(nil).WithMessage(message)
|
|
||||||
}
|
}
|
||||||
execution := executions[0]
|
execution := executions[0]
|
||||||
return bc.execMgr.Stop(ctx, execution.ID)
|
return bc.execMgr.Stop(ctx, execution.ID)
|
||||||
@ -590,7 +589,7 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("no scanner registration configured for project: %d", artifact.ProjectID)
|
return nil, errors.NotFoundError(nil).WithMessagef("no scanner registration configured for project: %d", artifact.ProjectID)
|
||||||
}
|
}
|
||||||
|
|
||||||
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
||||||
@ -599,7 +598,7 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !scannable {
|
if !scannable {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
return nil, errors.NotFoundError(nil).WithMessagef("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
groupReports := make([][]*scan.Report, len(artifacts))
|
groupReports := make([][]*scan.Report, len(artifacts))
|
||||||
@ -681,7 +680,7 @@ func (bc *basicController) GetScanLog(ctx context.Context, artifact *ar.Artifact
|
|||||||
reportUUIDToTasks := map[string]*task.Task{}
|
reportUUIDToTasks := map[string]*task.Task{}
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
if !scanTaskForArtifacts(t, artifactMap) {
|
if !scanTaskForArtifacts(t, artifactMap) {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("scan log with uuid: %s not found", uuid)
|
return nil, errors.NotFoundError(nil).WithMessagef("scan log with uuid: %s not found", uuid)
|
||||||
}
|
}
|
||||||
for _, reportUUID := range GetReportUUIDs(t.ExtraAttrs) {
|
for _, reportUUID := range GetReportUUIDs(t.ExtraAttrs) {
|
||||||
reportUUIDToTasks[reportUUID] = t
|
reportUUIDToTasks[reportUUID] = t
|
||||||
@ -867,7 +866,8 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
|||||||
CreatorType: "local",
|
CreatorType: "local",
|
||||||
CreatorRef: int64(0),
|
CreatorRef: int64(0),
|
||||||
},
|
},
|
||||||
Level: robot.LEVELPROJECT,
|
ProjectName: projectName,
|
||||||
|
Level: robot.LEVELPROJECT,
|
||||||
Permissions: []*robot.Permission{
|
Permissions: []*robot.Permission{
|
||||||
{
|
{
|
||||||
Kind: "project",
|
Kind: "project",
|
||||||
@ -1042,7 +1042,7 @@ func (bc *basicController) getScanTask(ctx context.Context, reportUUID string) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(tasks) == 0 {
|
if len(tasks) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("task for report %s not found", reportUUID)
|
return nil, errors.NotFoundError(nil).WithMessagef("task for report %s not found", reportUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tasks[0], nil
|
return tasks[0], nil
|
||||||
|
@ -238,7 +238,8 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||||||
CreatorType: "local",
|
CreatorType: "local",
|
||||||
CreatorRef: int64(0),
|
CreatorRef: int64(0),
|
||||||
},
|
},
|
||||||
Level: robot.LEVELPROJECT,
|
ProjectName: "library",
|
||||||
|
Level: robot.LEVELPROJECT,
|
||||||
Permissions: []*robot.Permission{
|
Permissions: []*robot.Permission{
|
||||||
{
|
{
|
||||||
Kind: "project",
|
Kind: "project",
|
||||||
|
@ -98,7 +98,7 @@ func (bc *basicController) GetTotalOfRegistrations(ctx context.Context, query *q
|
|||||||
// CreateRegistration ...
|
// CreateRegistration ...
|
||||||
func (bc *basicController) CreateRegistration(ctx context.Context, registration *scanner.Registration) (string, error) {
|
func (bc *basicController) CreateRegistration(ctx context.Context, registration *scanner.Registration) (string, error) {
|
||||||
if isReservedName(registration.Name) {
|
if isReservedName(registration.Name) {
|
||||||
return "", errors.BadRequestError(nil).WithMessage(`name "%s" is reserved, please try a different name`, registration.Name)
|
return "", errors.BadRequestError(nil).WithMessagef(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the registration is available
|
// Check if the registration is available
|
||||||
@ -168,7 +168,7 @@ func (bc *basicController) UpdateRegistration(ctx context.Context, registration
|
|||||||
}
|
}
|
||||||
|
|
||||||
if isReservedName(registration.Name) {
|
if isReservedName(registration.Name) {
|
||||||
return errors.BadRequestError(nil).WithMessage(`name "%s" is reserved, please try a different name`, registration.Name)
|
return errors.BadRequestError(nil).WithMessagef(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bc.manager.Update(ctx, registration)
|
return bc.manager.Update(ctx, registration)
|
||||||
@ -343,7 +343,7 @@ func (bc *basicController) GetMetadata(ctx context.Context, registrationUUID str
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("registration %s not found", registrationUUID)
|
return nil, errors.NotFoundError(nil).WithMessagef("registration %s not found", registrationUUID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return bc.Ping(ctx, r)
|
return bc.Ping(ctx, r)
|
||||||
@ -402,7 +402,7 @@ type MetadataResult struct {
|
|||||||
func (m *MetadataResult) Unpack() (*v1.ScannerAdapterMetadata, error) {
|
func (m *MetadataResult) Unpack() (*v1.ScannerAdapterMetadata, error) {
|
||||||
var err error
|
var err error
|
||||||
if m.Error != "" {
|
if m.Error != "" {
|
||||||
err = fmt.Errorf(m.Error)
|
err = errors.New(nil).WithMessage(m.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.Metadata, err
|
return m.Metadata, err
|
||||||
|
@ -147,11 +147,11 @@ func (c *controller) attachTags(ctx context.Context, vuls []*secHubModel.Vulnera
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get tags in the artifact list
|
// get tags in the artifact list
|
||||||
var artifactIds []interface{}
|
var artifactIDs []interface{}
|
||||||
for k := range artifactTagMap {
|
for k := range artifactTagMap {
|
||||||
artifactIds = append(artifactIds, k)
|
artifactIDs = append(artifactIDs, k)
|
||||||
}
|
}
|
||||||
query := q.New(q.KeyWords{"artifact_id": q.NewOrList(artifactIds)})
|
query := q.New(q.KeyWords{"artifact_id": q.NewOrList(artifactIDs)})
|
||||||
tags, err := c.tagMgr.List(ctx, query)
|
tags, err := c.tagMgr.List(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return vuls, err
|
return vuls, err
|
||||||
|
@ -97,7 +97,7 @@ func (c *controller) Ensure(ctx context.Context, repositoryID, artifactID int64,
|
|||||||
// existing tag must check the immutable status and signature
|
// existing tag must check the immutable status and signature
|
||||||
if tag.Immutable {
|
if tag.Immutable {
|
||||||
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
||||||
WithMessage("the tag %s configured as immutable, cannot be updated", tag.Name)
|
WithMessagef("the tag %s configured as immutable, cannot be updated", tag.Name)
|
||||||
}
|
}
|
||||||
// the tag exists under the repository, but it is attached to other artifact
|
// the tag exists under the repository, but it is attached to other artifact
|
||||||
// update it to point to the provided artifact
|
// update it to point to the provided artifact
|
||||||
@ -189,7 +189,7 @@ func (c *controller) Delete(ctx context.Context, id int64) (err error) {
|
|||||||
}
|
}
|
||||||
if tag.Immutable {
|
if tag.Immutable {
|
||||||
return errors.New(nil).WithCode(errors.PreconditionCode).
|
return errors.New(nil).WithCode(errors.PreconditionCode).
|
||||||
WithMessage("the tag %s configured as immutable, cannot be deleted", tag.Name)
|
WithMessagef("the tag %s configured as immutable, cannot be deleted", tag.Name)
|
||||||
}
|
}
|
||||||
return c.tagMgr.Delete(ctx, id)
|
return c.tagMgr.Delete(ctx, id)
|
||||||
}
|
}
|
||||||
|
@ -178,17 +178,17 @@ func (c *controller) Count(ctx context.Context, query *q.Query) (int64, error) {
|
|||||||
func (c *controller) Delete(ctx context.Context, id int) error {
|
func (c *controller) Delete(ctx context.Context, id int) error {
|
||||||
// cleanup project member with the user
|
// cleanup project member with the user
|
||||||
if err := c.memberMgr.DeleteMemberByUserID(ctx, id); err != nil {
|
if err := c.memberMgr.DeleteMemberByUserID(ctx, id); err != nil {
|
||||||
return errors.UnknownError(err).WithMessage("delete user failed, user id: %v, cannot delete project user member, error:%v", id, err)
|
return errors.UnknownError(err).WithMessagef("delete user failed, user id: %v, cannot delete project user member, error:%v", id, err)
|
||||||
}
|
}
|
||||||
// delete oidc metadata under the user
|
// delete oidc metadata under the user
|
||||||
if lib.GetAuthMode(ctx) == common.OIDCAuth {
|
if lib.GetAuthMode(ctx) == common.OIDCAuth {
|
||||||
if err := c.oidcMetaMgr.DeleteByUserID(ctx, id); err != nil {
|
if err := c.oidcMetaMgr.DeleteByUserID(ctx, id); err != nil {
|
||||||
return errors.UnknownError(err).WithMessage("delete user failed, user id: %v, cannot delete oidc user, error:%v", id, err)
|
return errors.UnknownError(err).WithMessagef("delete user failed, user id: %v, cannot delete oidc user, error:%v", id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gdprSetting, err := config.GDPRSetting(ctx)
|
gdprSetting, err := config.GDPRSetting(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.UnknownError(err).WithMessage("failed to load GDPR setting: %v", err)
|
return errors.UnknownError(err).WithMessagef("failed to load GDPR setting: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if gdprSetting.AuditLogs {
|
if gdprSetting.AuditLogs {
|
||||||
|
@ -81,7 +81,7 @@ func (c *controller) Update(ctx context.Context, id int, groupName string) error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(ug) == 0 {
|
if len(ug) == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("the user group with id %v is not found", id)
|
return errors.NotFoundError(nil).WithMessagef("the user group with id %v is not found", id)
|
||||||
}
|
}
|
||||||
return c.mgr.UpdateName(ctx, id, groupName)
|
return c.mgr.UpdateName(ctx, id, groupName)
|
||||||
}
|
}
|
||||||
@ -90,10 +90,10 @@ func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, er
|
|||||||
if group.GroupType == common.LDAPGroupType {
|
if group.GroupType == common.LDAPGroupType {
|
||||||
ldapGroup, err := auth.SearchGroup(ctx, group.LdapGroupDN)
|
ldapGroup, err := auth.SearchGroup(ctx, group.LdapGroupDN)
|
||||||
if err == ldap.ErrNotFound || ldapGroup == nil {
|
if err == ldap.ErrNotFound || ldapGroup == nil {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("LDAP Group DN is not found: DN:%v", group.LdapGroupDN)
|
return 0, errors.BadRequestError(nil).WithMessagef("LDAP Group DN is not found: DN:%v", group.LdapGroupDN)
|
||||||
}
|
}
|
||||||
if err == ldap.ErrDNSyntax {
|
if err == ldap.ErrDNSyntax {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("invalid DN syntax. DN: %v", group.LdapGroupDN)
|
return 0, errors.BadRequestError(nil).WithMessagef("invalid DN syntax. DN: %v", group.LdapGroupDN)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -102,7 +102,7 @@ func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, er
|
|||||||
id, err := c.mgr.Create(ctx, group)
|
id, err := c.mgr.Create(ctx, group)
|
||||||
if err != nil && err == usergroup.ErrDupUserGroup {
|
if err != nil && err == usergroup.ErrDupUserGroup {
|
||||||
return 0, errors.ConflictError(nil).
|
return 0, errors.ConflictError(nil).
|
||||||
WithMessage("duplicate user group, group name:%v, group type: %v, ldap group DN: %v",
|
WithMessagef("duplicate user group, group name:%v, group type: %v, ldap group DN: %v",
|
||||||
group.GroupName, group.GroupType, group.LdapGroupDN)
|
group.GroupName, group.GroupType, group.LdapGroupDN)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ func (c *controller) GetTask(ctx context.Context, taskID int64) (*task.Task, err
|
|||||||
|
|
||||||
if len(tasks) == 0 {
|
if len(tasks) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("webhook task %d not found", taskID)
|
WithMessagef("webhook task %d not found", taskID)
|
||||||
}
|
}
|
||||||
return tasks[0], nil
|
return tasks[0], nil
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ func (d *DefaultAuthenticateHelper) OnBoardUser(_ context.Context, _ *models.Use
|
|||||||
// SearchUser - Get user information from account repository
|
// SearchUser - Get user information from account repository
|
||||||
func (d *DefaultAuthenticateHelper) SearchUser(_ context.Context, username string) (*models.User, error) {
|
func (d *DefaultAuthenticateHelper) SearchUser(_ context.Context, username string) (*models.User, error) {
|
||||||
log.Errorf("Not support searching user, username: %s", username)
|
log.Errorf("Not support searching user, username: %s", username)
|
||||||
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", username)
|
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessagef("%s not found", username)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PostAuthenticate - Update user information after authenticate, such as OnBoard or sync info etc
|
// PostAuthenticate - Update user information after authenticate, such as OnBoard or sync info etc
|
||||||
@ -118,7 +118,7 @@ func (d *DefaultAuthenticateHelper) OnBoardGroup(_ context.Context, _ *model.Use
|
|||||||
// SearchGroup - Search ldap group by group key, groupKey is the unique attribute of group in authenticator, for LDAP, the key is group DN
|
// SearchGroup - Search ldap group by group key, groupKey is the unique attribute of group in authenticator, for LDAP, the key is group DN
|
||||||
func (d *DefaultAuthenticateHelper) SearchGroup(_ context.Context, groupKey string) (*model.UserGroup, error) {
|
func (d *DefaultAuthenticateHelper) SearchGroup(_ context.Context, groupKey string) (*model.UserGroup, error) {
|
||||||
log.Errorf("Not support searching group, group key: %s", groupKey)
|
log.Errorf("Not support searching group, group key: %s", groupKey)
|
||||||
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", groupKey)
|
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessagef("%s not found", groupKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
var registry = make(map[string]AuthenticateHelper)
|
var registry = make(map[string]AuthenticateHelper)
|
||||||
@ -222,7 +222,7 @@ func SearchAndOnBoardUser(ctx context.Context, username string) (int, error) {
|
|||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if user == nil {
|
if user == nil {
|
||||||
return 0, libErrors.NotFoundError(nil).WithMessage(fmt.Sprintf("user %s is not found", username))
|
return 0, libErrors.NotFoundError(nil).WithMessagef("user %s is not found", username)
|
||||||
}
|
}
|
||||||
err = OnBoardUser(ctx, user)
|
err = OnBoardUser(ctx, user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -204,7 +204,7 @@ func verifyGroupInLDAP(groupDN string, sess *ldap.Session) (*model.Group, bool)
|
|||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
if len(lGroups) == 0 {
|
if len(lGroups) == 0 {
|
||||||
log.Warningf("Can not get the ldap group name with DN %v", groupDN)
|
log.Debugf("Can not get the ldap group name with DN %v", groupDN)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
return &lGroups[0], true
|
return &lGroups[0], true
|
||||||
@ -262,7 +262,7 @@ func (l *Auth) SearchUser(ctx context.Context, username string) (*models.User, e
|
|||||||
|
|
||||||
log.Debugf("Found ldap user %v", user)
|
log.Debugf("Found ldap user %v", user)
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("no user found: %v", username)
|
return nil, errors.NotFoundError(nil).WithMessagef("no user found: %v", username)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &user, nil
|
return &user, nil
|
||||||
@ -292,7 +292,7 @@ func (l *Auth) SearchGroup(ctx context.Context, groupKey string) (*ugModel.UserG
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(userGroupList) == 0 {
|
if len(userGroupList) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("failed to searh ldap group with groupDN:%v", groupKey)
|
return nil, errors.NotFoundError(nil).WithMessagef("failed to searh ldap group with groupDN:%v", groupKey)
|
||||||
}
|
}
|
||||||
userGroup := ugModel.UserGroup{
|
userGroup := ugModel.UserGroup{
|
||||||
GroupName: userGroupList[0].Name,
|
GroupName: userGroupList[0].Name,
|
||||||
|
@ -39,7 +39,7 @@ func (h *Handler) Get() {
|
|||||||
tokenCreator, ok := creatorMap[service]
|
tokenCreator, ok := creatorMap[service]
|
||||||
if !ok {
|
if !ok {
|
||||||
errMsg := fmt.Sprintf("Unable to handle service: %s", service)
|
errMsg := fmt.Sprintf("Unable to handle service: %s", service)
|
||||||
log.Errorf(errMsg)
|
log.Error(errMsg)
|
||||||
h.CustomAbort(http.StatusBadRequest, template.HTMLEscapeString(errMsg))
|
h.CustomAbort(http.StatusBadRequest, template.HTMLEscapeString(errMsg))
|
||||||
}
|
}
|
||||||
token, err := tokenCreator.Create(request)
|
token, err := tokenCreator.Create(request)
|
||||||
|
54
src/go.mod
54
src/go.mod
@ -1,11 +1,11 @@
|
|||||||
module github.com/goharbor/harbor/src
|
module github.com/goharbor/harbor/src
|
||||||
|
|
||||||
go 1.22.3
|
go 1.23.2
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/FZambia/sentinel v1.1.0
|
github.com/FZambia/sentinel v1.1.0
|
||||||
github.com/Masterminds/semver v1.5.0
|
github.com/Masterminds/semver v1.5.0
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97
|
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1193
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
||||||
github.com/aws/aws-sdk-go v1.55.5
|
github.com/aws/aws-sdk-go v1.55.5
|
||||||
github.com/beego/beego/v2 v2.2.1
|
github.com/beego/beego/v2 v2.2.1
|
||||||
@ -48,36 +48,38 @@ require (
|
|||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.1.0
|
github.com/opencontainers/image-spec v1.1.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.19.1
|
github.com/prometheus/client_golang v1.20.4
|
||||||
github.com/robfig/cron/v3 v3.0.1
|
github.com/robfig/cron/v3 v3.0.1
|
||||||
github.com/spf13/viper v1.19.0
|
github.com/spf13/viper v1.19.0
|
||||||
github.com/stretchr/testify v1.9.0
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.233+incompatible
|
github.com/tencentcloud/tencentcloud-sdk-go v3.0.233+incompatible
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1
|
github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||||
github.com/volcengine/volcengine-go-sdk v1.0.138
|
github.com/volcengine/volcengine-go-sdk v1.0.159
|
||||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0
|
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0
|
||||||
go.opentelemetry.io/otel v1.29.0
|
go.opentelemetry.io/otel v1.30.0
|
||||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
|
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0
|
go.opentelemetry.io/otel/sdk v1.27.0
|
||||||
go.opentelemetry.io/otel/trace v1.29.0
|
go.opentelemetry.io/otel/trace v1.30.0
|
||||||
go.uber.org/ratelimit v0.3.1
|
go.uber.org/ratelimit v0.3.1
|
||||||
golang.org/x/crypto v0.25.0
|
golang.org/x/crypto v0.27.0
|
||||||
golang.org/x/net v0.27.0
|
golang.org/x/net v0.27.0
|
||||||
golang.org/x/oauth2 v0.21.0
|
golang.org/x/oauth2 v0.21.0
|
||||||
golang.org/x/sync v0.7.0
|
golang.org/x/sync v0.8.0
|
||||||
golang.org/x/text v0.16.0
|
golang.org/x/text v0.18.0
|
||||||
golang.org/x/time v0.5.0
|
golang.org/x/time v0.7.0
|
||||||
gopkg.in/h2non/gock.v1 v1.1.2
|
gopkg.in/h2non/gock.v1 v1.1.2
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
helm.sh/helm/v3 v3.15.4
|
helm.sh/helm/v3 v3.15.4
|
||||||
k8s.io/api v0.30.3
|
k8s.io/api v0.31.1
|
||||||
k8s.io/apimachinery v0.30.3
|
k8s.io/apimachinery v0.31.1
|
||||||
k8s.io/client-go v0.30.3
|
k8s.io/client-go v0.31.1
|
||||||
sigs.k8s.io/yaml v1.4.0
|
sigs.k8s.io/yaml v1.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require github.com/prometheus/client_model v0.6.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect
|
github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect
|
||||||
@ -94,7 +96,7 @@ require (
|
|||||||
github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
|
github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
|
||||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect
|
github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect
|
||||||
@ -105,6 +107,7 @@ require (
|
|||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||||
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.2 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
@ -132,7 +135,7 @@ require (
|
|||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/compress v1.17.2 // indirect
|
github.com/klauspost/compress v1.17.9 // indirect
|
||||||
github.com/lib/pq v1.10.9 // indirect
|
github.com/lib/pq v1.10.9 // indirect
|
||||||
github.com/magiconair/properties v1.8.7 // indirect
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
@ -141,13 +144,13 @@ require (
|
|||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/common v0.55.0 // indirect
|
||||||
github.com/prometheus/common v0.48.0 // indirect
|
github.com/prometheus/procfs v0.15.1 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
|
||||||
github.com/robfig/cron v1.0.0 // indirect
|
github.com/robfig/cron v1.0.0 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
@ -164,27 +167,28 @@ require (
|
|||||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||||
github.com/volcengine/volc-sdk-golang v1.0.23 // indirect
|
github.com/volcengine/volc-sdk-golang v1.0.23 // indirect
|
||||||
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
go.opentelemetry.io/otel/metric v1.30.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.9.0 // indirect
|
go.uber.org/multierr v1.9.0 // indirect
|
||||||
go.uber.org/zap v1.21.0 // indirect
|
go.uber.org/zap v1.21.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||||
golang.org/x/sys v0.22.0 // indirect
|
golang.org/x/sys v0.25.0 // indirect
|
||||||
golang.org/x/term v0.22.0 // indirect
|
golang.org/x/term v0.24.0 // indirect
|
||||||
google.golang.org/api v0.171.0 // indirect
|
google.golang.org/api v0.171.0 // indirect
|
||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||||
google.golang.org/grpc v1.64.1 // indirect
|
google.golang.org/grpc v1.64.1 // indirect
|
||||||
google.golang.org/protobuf v1.34.1 // indirect
|
google.golang.org/protobuf v1.34.2 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
k8s.io/klog/v2 v2.120.1 // indirect
|
k8s.io/klog/v2 v2.130.1 // indirect
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
)
|
)
|
||||||
|
106
src/go.sum
106
src/go.sum
@ -55,6 +55,8 @@ github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3Uu
|
|||||||
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 h1:bNE5ID4C3YOkROfvBjXJUG53gyb+8az3TQN02LqnGBk=
|
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 h1:bNE5ID4C3YOkROfvBjXJUG53gyb+8az3TQN02LqnGBk=
|
||||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
|
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1193 h1:C5LuIDWuQlugv30EBsSLKFF6jdtrqoVH84nYCdVYTC4=
|
||||||
|
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1193/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA=
|
||||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
@ -81,8 +83,8 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
|
|||||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc=
|
github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc=
|
||||||
github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
|
github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
|
||||||
@ -143,6 +145,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||||
|
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||||
@ -342,8 +346,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
|||||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
@ -356,6 +360,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
|||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
@ -414,9 +420,8 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
|||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
|
||||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE=
|
|
||||||
github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||||
@ -435,22 +440,22 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
|||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
|
||||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/robfig/cron v1.0.0 h1:slmQxIUH6U9ruw4XoJ7C2pyyx4yYeiHx8S9pNootHsM=
|
github.com/robfig/cron v1.0.0 h1:slmQxIUH6U9ruw4XoJ7C2pyyx4yYeiHx8S9pNootHsM=
|
||||||
github.com/robfig/cron v1.0.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
github.com/robfig/cron v1.0.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||||
@ -527,8 +532,10 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
|
|||||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||||
github.com/volcengine/volc-sdk-golang v1.0.23 h1:anOslb2Qp6ywnsbyq9jqR0ljuO63kg9PY+4OehIk5R8=
|
github.com/volcengine/volc-sdk-golang v1.0.23 h1:anOslb2Qp6ywnsbyq9jqR0ljuO63kg9PY+4OehIk5R8=
|
||||||
github.com/volcengine/volc-sdk-golang v1.0.23/go.mod h1:AfG/PZRUkHJ9inETvbjNifTDgut25Wbkm2QoYBTbvyU=
|
github.com/volcengine/volc-sdk-golang v1.0.23/go.mod h1:AfG/PZRUkHJ9inETvbjNifTDgut25Wbkm2QoYBTbvyU=
|
||||||
github.com/volcengine/volcengine-go-sdk v1.0.138 h1:u1dL+Dc1kWBTrufU4LrspRdvjhkxNESWfMHR/G4Pvcg=
|
github.com/volcengine/volcengine-go-sdk v1.0.159 h1:gbyZRQpPbBwN23UeWk7DiaX3GYNge59zoC89P9RphaI=
|
||||||
github.com/volcengine/volcengine-go-sdk v1.0.138/go.mod h1:oht5AKDJsk0fY6tV2ViqaVlOO14KSRmXZlI8ikK60Tg=
|
github.com/volcengine/volcengine-go-sdk v1.0.159/go.mod h1:oht5AKDJsk0fY6tV2ViqaVlOO14KSRmXZlI8ikK60Tg=
|
||||||
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
@ -538,25 +545,25 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
|||||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0 h1:rXpHmgy1pMXlfv3W1T5ctoDA3QeTFjNq/YwCmwrfr8Q=
|
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0 h1:rXpHmgy1pMXlfv3W1T5ctoDA3QeTFjNq/YwCmwrfr8Q=
|
||||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0/go.mod h1:9uIRD3NZrM7QMQEGeKhr7V4xSDTMku3MPOVs8iZ3VVk=
|
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0/go.mod h1:9uIRD3NZrM7QMQEGeKhr7V4xSDTMku3MPOVs8iZ3VVk=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI=
|
||||||
go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
|
go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg=
|
||||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts=
|
||||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc=
|
||||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0 h1:cLhx8llHw02h5JTqGqaRbYn+QVKHmrzD9vEbKnSPk5U=
|
go.opentelemetry.io/otel/exporters/jaeger v1.0.0 h1:cLhx8llHw02h5JTqGqaRbYn+QVKHmrzD9vEbKnSPk5U=
|
||||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0/go.mod h1:q10N1AolE1JjqKrFJK2tYw0iZpmX+HBaXBtuCzRnBGQ=
|
go.opentelemetry.io/otel/exporters/jaeger v1.0.0/go.mod h1:q10N1AolE1JjqKrFJK2tYw0iZpmX+HBaXBtuCzRnBGQ=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w=
|
||||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ=
|
||||||
go.opentelemetry.io/otel/sdk v1.0.0/go.mod h1:PCrDHlSy5x1kjezSdL37PhbFUMjrsLRshJ2zCzeXwbM=
|
go.opentelemetry.io/otel/sdk v1.0.0/go.mod h1:PCrDHlSy5x1kjezSdL37PhbFUMjrsLRshJ2zCzeXwbM=
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
|
go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI=
|
||||||
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A=
|
||||||
go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
|
go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc=
|
||||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o=
|
||||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
@ -566,8 +573,9 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
|
|
||||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
@ -595,8 +603,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
|||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||||
@ -647,8 +655,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -682,16 +690,16 @@ golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||||
golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
|
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
@ -701,11 +709,11 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
|||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
@ -762,8 +770,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@ -800,18 +808,18 @@ helm.sh/helm/v3 v3.15.4/go.mod h1:phOwlxqGSgppCY/ysWBNRhG3MtnpsttOzxaTK+Mt40E=
|
|||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ=
|
k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU=
|
||||||
k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04=
|
k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI=
|
||||||
k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc=
|
k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U=
|
||||||
k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||||
k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k=
|
k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0=
|
||||||
k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U=
|
k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg=
|
||||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
|
@ -713,7 +713,7 @@ func (gc *GarbageCollector) markDeleteFailed(ctx job.Context, blob *blobModels.B
|
|||||||
return errors.Wrapf(err, "failed to mark gc candidate delete failed: %s, %s", blob.Digest, blob.Status)
|
return errors.Wrapf(err, "failed to mark gc candidate delete failed: %s, %s", blob.Digest, blob.Status)
|
||||||
}
|
}
|
||||||
if count == 0 {
|
if count == 0 {
|
||||||
return errors.New(nil).WithMessage("no blob found to mark delete failed, ID:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
return errors.New(nil).WithMessagef("no blob found to mark delete failed, ID:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -201,24 +201,24 @@ func (sde *ScanDataExport) writeCsvFile(ctx job.Context, params job.Parameters,
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
projectIds := filterCriteria.Projects
|
projectIDs := filterCriteria.Projects
|
||||||
if len(projectIds) == 0 {
|
if len(projectIDs) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// extract the repository ids if any repositories have been specified
|
// extract the repository ids if any repositories have been specified
|
||||||
repoIds, err := sde.filterProcessor.ProcessRepositoryFilter(systemContext, filterCriteria.Repositories, projectIds)
|
repoIDs, err := sde.filterProcessor.ProcessRepositoryFilter(systemContext, filterCriteria.Repositories, projectIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(repoIds) == 0 {
|
if len(repoIDs) == 0 {
|
||||||
logger.Infof("No repositories found with specified names: %v", filterCriteria.Repositories)
|
logger.Infof("No repositories found with specified names: %v", filterCriteria.Repositories)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// filter artifacts by tags
|
// filter artifacts by tags
|
||||||
arts, err := sde.filterProcessor.ProcessTagFilter(systemContext, filterCriteria.Tags, repoIds)
|
arts, err := sde.filterProcessor.ProcessTagFilter(systemContext, filterCriteria.Tags, repoIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -14,7 +14,10 @@
|
|||||||
|
|
||||||
package logger
|
package logger
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
// Entry provides unique interfaces on top of multiple logger backends.
|
// Entry provides unique interfaces on top of multiple logger backends.
|
||||||
// Entry also implements @Interface.
|
// Entry also implements @Interface.
|
||||||
@ -115,7 +118,7 @@ func (e *Entry) Close() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if errMsg != "" {
|
if errMsg != "" {
|
||||||
return fmt.Errorf(errMsg)
|
return errors.New(errMsg)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package mgt
|
package mgt
|
||||||
|
|
||||||
|
@ -390,7 +390,7 @@ func toInt(v interface{}) int64 {
|
|||||||
|
|
||||||
func hashKey(p *period.Policy) string {
|
func hashKey(p *period.Policy) string {
|
||||||
key := p.JobName
|
key := p.JobName
|
||||||
if p.JobParameters != nil && len(p.JobParameters) > 0 {
|
if len(p.JobParameters) > 0 {
|
||||||
if bytes, err := json.Marshal(p.JobParameters); err == nil {
|
if bytes, err := json.Marshal(p.JobParameters); err == nil {
|
||||||
key = fmt.Sprintf("%s:%s", key, string(bytes))
|
key = fmt.Sprintf("%s:%s", key, string(bytes))
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package period
|
package period
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ func (w *basicWorker) Start() error {
|
|||||||
logger.Infof("Basic worker is started")
|
logger.Infof("Basic worker is started")
|
||||||
|
|
||||||
// Start the reaper
|
// Start the reaper
|
||||||
w.knownJobs.Range(func(k interface{}, v interface{}) bool {
|
w.knownJobs.Range(func(k interface{}, _ interface{}) bool {
|
||||||
w.reaper.jobTypes = append(w.reaper.jobTypes, k.(string))
|
w.reaper.jobTypes = append(w.reaper.jobTypes, k.(string))
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
@ -136,7 +136,7 @@ func (r *reaper) syncOutdatedStats() error {
|
|||||||
|
|
||||||
// Loop all the in progress jobs to check if they're really in progress or
|
// Loop all the in progress jobs to check if they're really in progress or
|
||||||
// status is hung.
|
// status is hung.
|
||||||
h := func(k string, v int64) (err error) {
|
h := func(k string, _ int64) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if errs.IsObjectNotFoundError(err) {
|
if errs.IsObjectNotFoundError(err) {
|
||||||
// As the job stats is lost and we don't have chance to restore it, then directly discard it.
|
// As the job stats is lost and we don't have chance to restore it, then directly discard it.
|
||||||
|
2
src/lib/cache/mock_cache_test.go
vendored
2
src/lib/cache/mock_cache_test.go
vendored
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
@ -35,10 +35,10 @@ func ValidateHTTPURL(s string) (string, error) {
|
|||||||
}
|
}
|
||||||
url, err := url.Parse(s)
|
url, err := url.Parse(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("invalid URL: %s", err.Error())
|
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid URL: %s", err.Error())
|
||||||
}
|
}
|
||||||
if url.Scheme != "http" && url.Scheme != "https" {
|
if url.Scheme != "http" && url.Scheme != "https" {
|
||||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("invalid HTTP scheme: %s", url.Scheme)
|
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid HTTP scheme: %s", url.Scheme)
|
||||||
}
|
}
|
||||||
// To avoid SSRF security issue, refer to #3755 for more detail
|
// To avoid SSRF security issue, refer to #3755 for more detail
|
||||||
return fmt.Sprintf("%s://%s%s", url.Scheme, url.Host, url.Path), nil
|
return fmt.Sprintf("%s://%s%s", url.Scheme, url.Host, url.Path), nil
|
||||||
|
@ -63,12 +63,18 @@ func (e *Error) MarshalJSON() ([]byte, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithMessage ...
|
// WithMessagef ...
|
||||||
func (e *Error) WithMessage(format string, v ...interface{}) *Error {
|
func (e *Error) WithMessagef(format string, v ...interface{}) *Error {
|
||||||
e.Message = fmt.Sprintf(format, v...)
|
e.Message = fmt.Sprintf(format, v...)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithMessage ...
|
||||||
|
func (e *Error) WithMessage(message string) *Error {
|
||||||
|
e.Message = message
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
// WithCode ...
|
// WithCode ...
|
||||||
func (e *Error) WithCode(code string) *Error {
|
func (e *Error) WithCode(code string) *Error {
|
||||||
e.Code = code
|
e.Code = code
|
||||||
|
@ -53,7 +53,7 @@ func AsNotFoundError(err error, messageFormat string, args ...interface{}) *erro
|
|||||||
if errors.Is(err, orm.ErrNoRows) {
|
if errors.Is(err, orm.ErrNoRows) {
|
||||||
e := errors.NotFoundError(nil)
|
e := errors.NotFoundError(nil)
|
||||||
if len(messageFormat) > 0 {
|
if len(messageFormat) > 0 {
|
||||||
_ = e.WithMessage(messageFormat, args...)
|
_ = e.WithMessagef(messageFormat, args...)
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
@ -66,7 +66,7 @@ func AsConflictError(err error, messageFormat string, args ...interface{}) *erro
|
|||||||
if IsDuplicateKeyError(err) {
|
if IsDuplicateKeyError(err) {
|
||||||
e := errors.New(err).
|
e := errors.New(err).
|
||||||
WithCode(errors.ConflictCode).
|
WithCode(errors.ConflictCode).
|
||||||
WithMessage(messageFormat, args...)
|
WithMessagef(messageFormat, args...)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -78,7 +78,7 @@ func AsForeignKeyError(err error, messageFormat string, args ...interface{}) *er
|
|||||||
if isViolatingForeignKeyConstraintError(err) {
|
if isViolatingForeignKeyConstraintError(err) {
|
||||||
e := errors.New(err).
|
e := errors.New(err).
|
||||||
WithCode(errors.ViolateForeignKeyConstraintCode).
|
WithCode(errors.ViolateForeignKeyConstraintCode).
|
||||||
WithMessage(messageFormat, args...)
|
WithMessagef(messageFormat, args...)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -70,7 +70,7 @@ func parseKeywords(q string) (map[string]interface{}, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New(err).
|
return nil, errors.New(err).
|
||||||
WithCode(errors.BadRequestCode).
|
WithCode(errors.BadRequestCode).
|
||||||
WithMessage("invalid query string value: %s", strs[1])
|
WithMessagef("invalid query string value: %s", strs[1])
|
||||||
}
|
}
|
||||||
keywords[strs[0]] = value
|
keywords[strs[0]] = value
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ func GetRedisPool(name string, rawurl string, param *PoolParam) (*redis.Pool, er
|
|||||||
Dial: func() (redis.Conn, error) {
|
Dial: func() (redis.Conn, error) {
|
||||||
return redis.DialURL(rawurl)
|
return redis.DialURL(rawurl)
|
||||||
},
|
},
|
||||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
TestOnBorrow: func(c redis.Conn, _ time.Time) error {
|
||||||
_, err := c.Do("PING")
|
_, err := c.Do("PING")
|
||||||
return err
|
return err
|
||||||
},
|
},
|
||||||
@ -172,7 +172,7 @@ func getSentinelPool(u *url.URL, param *PoolParam, name string) (*redis.Pool, er
|
|||||||
log.Debug(name, "dial redis master:", masterAddr, "db:", db)
|
log.Debug(name, "dial redis master:", masterAddr, "db:", db)
|
||||||
return redis.Dial("tcp", masterAddr, redisOptions...)
|
return redis.Dial("tcp", masterAddr, redisOptions...)
|
||||||
},
|
},
|
||||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
TestOnBorrow: func(c redis.Conn, _ time.Time) error {
|
||||||
if !sentinel.TestRole(c, "master") {
|
if !sentinel.TestRole(c, "master") {
|
||||||
return fmt.Errorf("check role failed, %s", name)
|
return fmt.Errorf("check role failed, %s", name)
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ func Get(kind, decoration, pattern, extras string) (selector.Selector, error) {
|
|||||||
func Index() []*IndexedMeta {
|
func Index() []*IndexedMeta {
|
||||||
all := make([]*IndexedMeta, 0)
|
all := make([]*IndexedMeta, 0)
|
||||||
|
|
||||||
index.Range(func(k, v interface{}) bool {
|
index.Range(func(_, v interface{}) bool {
|
||||||
if item, ok := v.(*indexedItem); ok {
|
if item, ok := v.(*indexedItem); ok {
|
||||||
all = append(all, item.Meta)
|
all = append(all, item.Meta)
|
||||||
return true
|
return true
|
||||||
|
@ -132,7 +132,7 @@ func (d *dao) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("accessory %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("accessory %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ func (d *dao) GetByDigest(ctx context.Context, repository, digest string) (*Arti
|
|||||||
}
|
}
|
||||||
if len(artifacts) == 0 {
|
if len(artifacts) == 0 {
|
||||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||||
WithMessage("artifact %s@%s not found", repository, digest)
|
WithMessagef("artifact %s@%s not found", repository, digest)
|
||||||
}
|
}
|
||||||
return artifacts[0], nil
|
return artifacts[0], nil
|
||||||
}
|
}
|
||||||
@ -181,7 +181,7 @@ func (d *dao) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("artifact %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("artifact %d not found", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -197,7 +197,7 @@ func (d *dao) Update(ctx context.Context, artifact *Artifact, props ...string) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("artifact %d not found", artifact.ID)
|
return errors.NotFoundError(nil).WithMessagef("artifact %d not found", artifact.ID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -261,7 +261,7 @@ func (d *dao) DeleteReference(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("artifact reference %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("artifact reference %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ func (d *dao) Delete(ctx context.Context, id int64) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("artifact trash %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("artifact trash %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ func (d *dao) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("access %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("access %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -385,7 +385,7 @@ func (d *dao) DeleteBlob(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("blob %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("blob %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ func (m *manager) Update(ctx context.Context, blob *Blob) error {
|
|||||||
func (m *manager) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, error) {
|
func (m *manager) UpdateBlobStatus(ctx context.Context, blob *models.Blob) (int64, error) {
|
||||||
_, exist := models.StatusMap[blob.Status]
|
_, exist := models.StatusMap[blob.Status]
|
||||||
if !exist {
|
if !exist {
|
||||||
return -1, errors.New(nil).WithMessage("cannot update blob status, as the status is unknown. digest: %s, status: %s", blob.Digest, blob.Status)
|
return -1, errors.New(nil).WithMessagef("cannot update blob status, as the status is unknown. digest: %s, status: %s", blob.Digest, blob.Status)
|
||||||
}
|
}
|
||||||
return m.dao.UpdateBlobStatus(ctx, blob)
|
return m.dao.UpdateBlobStatus(ctx, blob)
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ func (e *Exporter) RegisterCollector(collectors ...collector) error {
|
|||||||
func newServer(opt *Opt, _ *prometheus.Registry) *http.Server {
|
func newServer(opt *Opt, _ *prometheus.Registry) *http.Server {
|
||||||
exporterMux := http.NewServeMux()
|
exporterMux := http.NewServeMux()
|
||||||
exporterMux.Handle(opt.MetricsPath, promhttp.Handler())
|
exporterMux.Handle(opt.MetricsPath, promhttp.Handler())
|
||||||
exporterMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
exporterMux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
|
||||||
_, _ = w.Write([]byte(`<html>
|
_, _ = w.Write([]byte(`<html>
|
||||||
<head><title>Harbor Exporter</title></head>
|
<head><title>Harbor Exporter</title></head>
|
||||||
<body>
|
<body>
|
||||||
|
@ -69,7 +69,7 @@ func (i *iDao) UpdateImmutableRule(ctx context.Context, projectID int64, ir *mod
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("immutable %d not found", ir.ID)
|
return errors.NotFoundError(nil).WithMessagef("immutable %d not found", ir.ID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -86,7 +86,7 @@ func (i *iDao) ToggleImmutableRule(ctx context.Context, id int64, status bool) e
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("immutable %d not found", ir.ID)
|
return errors.NotFoundError(nil).WithMessagef("immutable %d not found", ir.ID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ func (i *iDao) DeleteImmutableRule(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("immutable rule %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("immutable rule %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -123,7 +123,7 @@ func (d *defaultDAO) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("label %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("label %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ func (d *defaultDAO) DeleteReference(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("label reference %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("label reference %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ func (m *manager) RemoveFrom(ctx context.Context, labelID int64, artifactID int6
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("reference with label %d and artifact %d not found", labelID, artifactID)
|
return errors.NotFoundError(nil).WithMessagef("reference with label %d and artifact %d not found", labelID, artifactID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -52,9 +52,9 @@ func (l *Label) Valid() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if l.Scope != common.LabelScopeGlobal && l.Scope != common.LabelScopeProject {
|
if l.Scope != common.LabelScopeGlobal && l.Scope != common.LabelScopeProject {
|
||||||
return errors.New(nil).WithMessage("invalid: %s", l.Scope).WithCode(errors.BadRequestCode)
|
return errors.New(nil).WithMessagef("invalid: %s", l.Scope).WithCode(errors.BadRequestCode)
|
||||||
} else if l.Scope == common.LabelScopeProject && l.ProjectID <= 0 {
|
} else if l.Scope == common.LabelScopeProject && l.ProjectID <= 0 {
|
||||||
return errors.New(nil).WithMessage("invalid: %d", l.ProjectID).WithCode(errors.BadRequestCode)
|
return errors.New(nil).WithMessagef("invalid: %d", l.ProjectID).WithCode(errors.BadRequestCode)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func (m *manager) Get(ctx context.Context, projectID int64, memberID int) (*mode
|
|||||||
}
|
}
|
||||||
if len(pm) == 0 {
|
if len(pm) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).
|
return nil, errors.NotFoundError(nil).
|
||||||
WithMessage("the project member is not found, project id %v, member id %v", projectID, memberID)
|
WithMessagef("the project member is not found, project id %v, member id %v", projectID, memberID)
|
||||||
}
|
}
|
||||||
return pm[0], nil
|
return pm[0], nil
|
||||||
}
|
}
|
||||||
|
@ -150,7 +150,7 @@ func (d *dao) Delete(ctx context.Context, id int64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("notificationPolicy %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("notificationPolicy %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ func (md *metaDAO) Update(ctx context.Context, oidcUser *models.OIDCUser, props
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("oidc user data with id %d not found", oidcUser.ID)
|
return errors.NotFoundError(nil).WithMessagef("oidc user data with id %d not found", oidcUser.ID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func (m *metaManager) GetBySubIss(ctx context.Context, sub, iss string) (*models
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(l) == 0 {
|
if len(l) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("oidc info for user with issuer %s, subject %s not found", iss, sub)
|
return nil, errors.NotFoundError(nil).WithMessagef("oidc info for user with issuer %s, subject %s not found", iss, sub)
|
||||||
}
|
}
|
||||||
if len(l) > 1 {
|
if len(l) > 1 {
|
||||||
logger.Warningf("Multiple oidc info records found for issuer %s, subject %s", iss, sub)
|
logger.Warningf("Multiple oidc info records found for issuer %s, subject %s", iss, sub)
|
||||||
@ -79,7 +79,7 @@ func (m *metaManager) GetByUserID(ctx context.Context, uid int) (*models.OIDCUse
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(l) == 0 {
|
if len(l) == 0 {
|
||||||
return nil, errors.NotFoundError(nil).WithMessage("oidc info for user %d not found", uid)
|
return nil, errors.NotFoundError(nil).WithMessagef("oidc info for user %d not found", uid)
|
||||||
}
|
}
|
||||||
if len(l) > 1 {
|
if len(l) > 1 {
|
||||||
logger.Warningf("%d records of oidc user Info found for user %d", len(l), uid)
|
logger.Warningf("%d records of oidc user Info found for user %d", len(l), uid)
|
||||||
|
@ -91,7 +91,7 @@ func (d *dao) Update(ctx context.Context, schema *policy.Schema, props ...string
|
|||||||
}
|
}
|
||||||
|
|
||||||
if id == 0 {
|
if id == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("policy %d not found", schema.ID)
|
return errors.NotFoundError(nil).WithMessagef("policy %d not found", schema.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -148,7 +148,7 @@ func (d *dao) Delete(ctx context.Context, id int64) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("policy %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("policy %d not found", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -132,13 +132,13 @@ func (s *Schema) ValidatePreheatPolicy() error {
|
|||||||
if s.Trigger != nil && s.Trigger.Type == TriggerTypeScheduled && len(s.Trigger.Settings.Cron) > 0 {
|
if s.Trigger != nil && s.Trigger.Type == TriggerTypeScheduled && len(s.Trigger.Settings.Cron) > 0 {
|
||||||
if err := utils.ValidateCronString(s.Trigger.Settings.Cron); err != nil {
|
if err := utils.ValidateCronString(s.Trigger.Settings.Cron); err != nil {
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||||
WithMessage("invalid cron string for scheduled preheat: %s, error: %v", s.Trigger.Settings.Cron, err)
|
WithMessagef("invalid cron string for scheduled preheat: %s, error: %v", s.Trigger.Settings.Cron, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate preheat scope
|
// validate preheat scope
|
||||||
if s.Scope != "" && s.Scope != ScopeTypeSinglePeer && s.Scope != ScopeTypeAllPeers {
|
if s.Scope != "" && s.Scope != ScopeTypeSinglePeer && s.Scope != ScopeTypeAllPeers {
|
||||||
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("invalid scope for preheat policy: %s", s.Scope)
|
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("invalid scope for preheat policy: %s", s.Scope)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -84,7 +84,7 @@ func NewHTTPClient(insecure bool) *HTTPClient {
|
|||||||
// Get content from the url
|
// Get content from the url
|
||||||
func (hc *HTTPClient) Get(url string, cred *auth.Credential, parmas map[string]string, options map[string]string) ([]byte, error) {
|
func (hc *HTTPClient) Get(url string, cred *auth.Credential, parmas map[string]string, options map[string]string) ([]byte, error) {
|
||||||
bytes, err := hc.get(url, cred, parmas, options)
|
bytes, err := hc.get(url, cred, parmas, options)
|
||||||
logMsg := fmt.Sprintf("Get %s with cred=%v, params=%v, options=%v", url, cred, parmas, options)
|
logMsg := fmt.Sprintf("Get %s with params=%v, options=%v", url, parmas, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("%s: %s", logMsg, err)
|
log.Errorf("%s: %s", logMsg, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -162,7 +162,7 @@ func (dd *DragonflyDriver) Self() *Metadata {
|
|||||||
ID: "dragonfly",
|
ID: "dragonfly",
|
||||||
Name: "Dragonfly",
|
Name: "Dragonfly",
|
||||||
Icon: "https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/master/docs/images/logo/dragonfly-linear.png",
|
Icon: "https://raw.githubusercontent.com/dragonflyoss/Dragonfly2/master/docs/images/logo/dragonfly-linear.png",
|
||||||
Version: "2.1.57",
|
Version: "2.1.59",
|
||||||
Source: "https://github.com/dragonflyoss/Dragonfly2",
|
Source: "https://github.com/dragonflyoss/Dragonfly2",
|
||||||
Maintainers: []string{"chlins.zhang@gmail.com", "gaius.qi@gmail.com"},
|
Maintainers: []string{"chlins.zhang@gmail.com", "gaius.qi@gmail.com"},
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func (s *regexpStore) Get(key string, build func(string) *regexp.Regexp) *regexp
|
|||||||
|
|
||||||
func (s *regexpStore) Purge() {
|
func (s *regexpStore) Purge() {
|
||||||
var keys []interface{}
|
var keys []interface{}
|
||||||
s.entries.Range(func(key, value interface{}) bool {
|
s.entries.Range(func(key, _ interface{}) bool {
|
||||||
keys = append(keys, key)
|
keys = append(keys, key)
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
@ -71,12 +71,12 @@ type manager struct {
|
|||||||
// Create creates project instance
|
// Create creates project instance
|
||||||
func (m *manager) Create(ctx context.Context, project *models.Project) (int64, error) {
|
func (m *manager) Create(ctx context.Context, project *models.Project) (int64, error) {
|
||||||
if project.OwnerID <= 0 {
|
if project.OwnerID <= 0 {
|
||||||
return 0, errors.BadRequestError(nil).WithMessage("Owner is missing when creating project %s", project.Name)
|
return 0, errors.BadRequestError(nil).WithMessagef("Owner is missing when creating project %s", project.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.IsIllegalLength(project.Name, projectNameMinLen, projectNameMaxLen) {
|
if utils.IsIllegalLength(project.Name, projectNameMinLen, projectNameMaxLen) {
|
||||||
format := "Project name %s is illegal in length. (greater than %d or less than %d)"
|
format := "Project name %s is illegal in length. (greater than %d or less than %d)"
|
||||||
return 0, errors.BadRequestError(nil).WithMessage(format, project.Name, projectNameMaxLen, projectNameMinLen)
|
return 0, errors.BadRequestError(nil).WithMessagef(format, project.Name, projectNameMaxLen, projectNameMinLen)
|
||||||
}
|
}
|
||||||
|
|
||||||
legal := validProjectName.MatchString(project.Name)
|
legal := validProjectName.MatchString(project.Name)
|
||||||
@ -107,7 +107,7 @@ func (m *manager) Get(ctx context.Context, idOrName interface{}) (*models.Projec
|
|||||||
if ok {
|
if ok {
|
||||||
// check white space in project name
|
// check white space in project name
|
||||||
if strings.Contains(name, " ") {
|
if strings.Contains(name, " ") {
|
||||||
return nil, errors.BadRequestError(nil).WithMessage("invalid project name: '%s'", name)
|
return nil, errors.BadRequestError(nil).WithMessagef("invalid project name: '%s'", name)
|
||||||
}
|
}
|
||||||
return m.dao.GetByName(ctx, name)
|
return m.dao.GetByName(ctx, name)
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func (d *dao) DeletePermission(ctx context.Context, id int64) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("role permission %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("role permission %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -106,7 +106,7 @@ func (d *dao) DeletePermissionsByRole(ctx context.Context, roleType string, role
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("role permission %s:%d not found", roleType, roleID)
|
return errors.NotFoundError(nil).WithMessagef("role permission %s:%d not found", roleType, roleID)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -132,7 +132,7 @@ func (d *dao) DeleteRbacPolicy(ctx context.Context, id int64) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
return errors.NotFoundError(nil).WithMessage("rbac policy %d not found", id)
|
return errors.NotFoundError(nil).WithMessagef("rbac policy %d not found", id)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -15,16 +15,12 @@
|
|||||||
package aliacr
|
package aliacr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
|
||||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/cr"
|
|
||||||
|
|
||||||
commonhttp "github.com/goharbor/harbor/src/common/http"
|
commonhttp "github.com/goharbor/harbor/src/common/http"
|
||||||
"github.com/goharbor/harbor/src/common/utils"
|
"github.com/goharbor/harbor/src/common/utils"
|
||||||
"github.com/goharbor/harbor/src/lib/log"
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
@ -45,48 +41,80 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// example:
|
// example:
|
||||||
// https://registry.%s.aliyuncs.com
|
|
||||||
// https://cr.%s.aliyuncs.com
|
// https://cr.%s.aliyuncs.com
|
||||||
// https://registry-vpc.%s.aliyuncs.com
|
var regACRServiceURL = regexp.MustCompile(`https://cr\.([\w\-]+)\.aliyuncs\.com`)
|
||||||
// https://registry-internal.%s.aliyuncs.com
|
|
||||||
var regRegion = regexp.MustCompile(`https://(registry|cr|registry-vpc|registry-internal)\.([\w\-]+)\.aliyuncs\.com`)
|
|
||||||
|
|
||||||
func getRegion(url string) (region string, err error) {
|
func getRegistryURL(url string) (string, error) {
|
||||||
if url == "" {
|
if url == "" {
|
||||||
return "", errors.New("empty url")
|
return "", errors.New("empty url")
|
||||||
}
|
}
|
||||||
rs := regRegion.FindStringSubmatch(url)
|
rs := regACRServiceURL.FindStringSubmatch(url)
|
||||||
if rs == nil {
|
if rs == nil {
|
||||||
return "", errors.New("invalid Rgistry|CR service url")
|
return url, nil
|
||||||
}
|
}
|
||||||
// fmt.Println(rs)
|
return fmt.Sprintf(registryEndpointTpl, rs[1]), nil
|
||||||
return rs[2], nil
|
}
|
||||||
|
|
||||||
|
// example:
|
||||||
|
// registry.aliyuncs.com:cn-hangzhou:china:cri-xxxxxxxxx
|
||||||
|
// registry.aliyuncs.com:cn-hangzhou:26842
|
||||||
|
func parseRegistryService(service string) (*registryServiceInfo, error) {
|
||||||
|
parts := strings.Split(service, ":")
|
||||||
|
length := len(parts)
|
||||||
|
if length < 2 {
|
||||||
|
return nil, errors.New("invalid service format: expected 'registry.aliyuncs.com:region:xxxxx'")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.EqualFold(parts[0], registryACRService) {
|
||||||
|
return nil, errors.New("not a acr service")
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(parts[length-1], "cri-") {
|
||||||
|
return ®istryServiceInfo{
|
||||||
|
IsACREE: true,
|
||||||
|
RegionID: parts[1],
|
||||||
|
InstanceID: parts[length-1],
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return ®istryServiceInfo{
|
||||||
|
IsACREE: false,
|
||||||
|
RegionID: parts[1],
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAdapter(registry *model.Registry) (*adapter, error) {
|
func newAdapter(registry *model.Registry) (*adapter, error) {
|
||||||
region, err := getRegion(registry.URL)
|
url, err := getRegistryURL(registry.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
switch true {
|
registry.URL = url
|
||||||
case strings.Contains(registry.URL, "registry-vpc"):
|
|
||||||
registry.URL = fmt.Sprintf(registryVPCEndpointTpl, region)
|
|
||||||
case strings.Contains(registry.URL, "registry-internal"):
|
|
||||||
registry.URL = fmt.Sprintf(registryInternalEndpointTpl, region)
|
|
||||||
default:
|
|
||||||
// fix url (allow user input cr service url)
|
|
||||||
registry.URL = fmt.Sprintf(registryEndpointTpl, region)
|
|
||||||
}
|
|
||||||
realm, service, err := util.Ping(registry)
|
realm, service, err := util.Ping(registry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
credential := NewAuth(region, registry.Credential.AccessKey, registry.Credential.AccessSecret)
|
|
||||||
authorizer := bearer.NewAuthorizer(realm, service, credential, commonhttp.GetHTTPTransport(commonhttp.WithInsecure(registry.Insecure)))
|
info, err := parseRegistryService(service)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var acrAPI openapi
|
||||||
|
if !info.IsACREE {
|
||||||
|
acrAPI, err = newAcrOpenapi(registry.Credential.AccessKey, registry.Credential.AccessSecret, info.RegionID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
acrAPI, err = newAcreeOpenapi(registry.Credential.AccessKey, registry.Credential.AccessSecret, info.RegionID, info.InstanceID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
authorizer := bearer.NewAuthorizer(realm, service, NewAuth(acrAPI), commonhttp.GetHTTPTransport(commonhttp.WithInsecure(registry.Insecure)))
|
||||||
return &adapter{
|
return &adapter{
|
||||||
region: region,
|
acrAPI: acrAPI,
|
||||||
registry: registry,
|
registry: registry,
|
||||||
domain: fmt.Sprintf(endpointTpl, region),
|
|
||||||
Adapter: native.NewAdapterWithAuthorizer(registry, authorizer),
|
Adapter: native.NewAdapterWithAuthorizer(registry, authorizer),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -112,16 +140,15 @@ var (
|
|||||||
// adapter for to aliyun docker registry
|
// adapter for to aliyun docker registry
|
||||||
type adapter struct {
|
type adapter struct {
|
||||||
*native.Adapter
|
*native.Adapter
|
||||||
region string
|
acrAPI openapi
|
||||||
domain string
|
|
||||||
registry *model.Registry
|
registry *model.Registry
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ adp.Adapter = &adapter{}
|
var _ adp.Adapter = &adapter{}
|
||||||
|
|
||||||
// Info ...
|
// Info ...
|
||||||
func (a *adapter) Info() (info *model.RegistryInfo, err error) {
|
func (a *adapter) Info() (*model.RegistryInfo, error) {
|
||||||
info = &model.RegistryInfo{
|
info := &model.RegistryInfo{
|
||||||
Type: model.RegistryTypeAliAcr,
|
Type: model.RegistryTypeAliAcr,
|
||||||
SupportedResourceTypes: []string{
|
SupportedResourceTypes: []string{
|
||||||
model.ResourceTypeImage,
|
model.ResourceTypeImage,
|
||||||
@ -141,7 +168,7 @@ func (a *adapter) Info() (info *model.RegistryInfo, err error) {
|
|||||||
model.TriggerTypeScheduled,
|
model.TriggerTypeScheduled,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAdapterInfo() *model.AdapterPattern {
|
func getAdapterInfo() *model.AdapterPattern {
|
||||||
@ -184,6 +211,16 @@ func getAdapterInfo() *model.AdapterPattern {
|
|||||||
Key: e + "-internal",
|
Key: e + "-internal",
|
||||||
Value: fmt.Sprintf("https://registry-internal.%s.aliyuncs.com", e),
|
Value: fmt.Sprintf("https://registry-internal.%s.aliyuncs.com", e),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
endpoints = append(endpoints, &model.Endpoint{
|
||||||
|
Key: e + "-ee-vpc",
|
||||||
|
Value: fmt.Sprintf("https://instanceName-registry-vpc.%s.cr.aliyuncs.com", e),
|
||||||
|
})
|
||||||
|
|
||||||
|
endpoints = append(endpoints, &model.Endpoint{
|
||||||
|
Key: e + "-ee",
|
||||||
|
Value: fmt.Sprintf("https://instanceName-registry.%s.cr.aliyuncs.com", e),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
info := &model.AdapterPattern{
|
info := &model.AdapterPattern{
|
||||||
EndpointPattern: &model.EndpointPattern{
|
EndpointPattern: &model.EndpointPattern{
|
||||||
@ -194,30 +231,8 @@ func getAdapterInfo() *model.AdapterPattern {
|
|||||||
return info
|
return info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *adapter) listNamespaces(c *cr.Client) (namespaces []string, err error) {
|
func (a *adapter) listCandidateNamespaces(namespacePattern string) ([]string, error) {
|
||||||
// list namespaces
|
var namespaces []string
|
||||||
var nsReq = cr.CreateGetNamespaceListRequest()
|
|
||||||
var nsResp *cr.GetNamespaceListResponse
|
|
||||||
nsReq.SetDomain(a.domain)
|
|
||||||
nsResp, err = c.GetNamespaceList(nsReq)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var resp = &aliACRNamespaceResp{}
|
|
||||||
err = json.Unmarshal(nsResp.GetHttpContentBytes(), resp)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, ns := range resp.Data.Namespaces {
|
|
||||||
namespaces = append(namespaces, ns.Namespace)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Debugf("FetchArtifacts.listNamespaces: %#v\n", namespaces)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *adapter) listCandidateNamespaces(c *cr.Client, namespacePattern string) (namespaces []string, err error) {
|
|
||||||
if len(namespacePattern) > 0 {
|
if len(namespacePattern) > 0 {
|
||||||
if nms, ok := util.IsSpecificPathComponent(namespacePattern); ok {
|
if nms, ok := util.IsSpecificPathComponent(namespacePattern); ok {
|
||||||
namespaces = append(namespaces, nms...)
|
namespaces = append(namespaces, nms...)
|
||||||
@ -228,19 +243,22 @@ func (a *adapter) listCandidateNamespaces(c *cr.Client, namespacePattern string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return a.listNamespaces(c)
|
if a.acrAPI == nil {
|
||||||
|
return nil, errors.New("acr api is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.acrAPI.ListNamespace()
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchArtifacts AliACR not support /v2/_catalog of Registry, we'll list all resources via Aliyun's API
|
// FetchArtifacts AliACR not support /v2/_catalog of Registry, we'll list all resources via Aliyun's API
|
||||||
func (a *adapter) FetchArtifacts(filters []*model.Filter) (resources []*model.Resource, err error) {
|
func (a *adapter) FetchArtifacts(filters []*model.Filter) ([]*model.Resource, error) {
|
||||||
log.Debugf("FetchArtifacts.filters: %#v\n", filters)
|
log.Debugf("FetchArtifacts.filters: %#v\n", filters)
|
||||||
|
|
||||||
var client *cr.Client
|
if a.acrAPI == nil {
|
||||||
client, err = cr.NewClientWithAccessKey(a.region, a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)
|
return nil, errors.New("acr api is nil")
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var resources []*model.Resource
|
||||||
// get filter pattern
|
// get filter pattern
|
||||||
var repoPattern string
|
var repoPattern string
|
||||||
var tagsPattern string
|
var tagsPattern string
|
||||||
@ -254,31 +272,29 @@ func (a *adapter) FetchArtifacts(filters []*model.Filter) (resources []*model.Re
|
|||||||
log.Debugf("\nrepoPattern=%s tagsPattern=%s\n\n", repoPattern, tagsPattern)
|
log.Debugf("\nrepoPattern=%s tagsPattern=%s\n\n", repoPattern, tagsPattern)
|
||||||
|
|
||||||
// get namespaces
|
// get namespaces
|
||||||
var namespaces []string
|
namespaces, err := a.listCandidateNamespaces(namespacePattern)
|
||||||
namespaces, err = a.listCandidateNamespaces(client, namespacePattern)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debugf("got namespaces: %v \n", namespaces)
|
log.Debugf("got namespaces: %v \n", namespaces)
|
||||||
|
|
||||||
// list repos
|
// list repos
|
||||||
var repositories []aliRepo
|
var repositories []*repository
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
var repos []aliRepo
|
repos, err := a.acrAPI.ListRepository(namespace)
|
||||||
repos, err = a.listReposByNamespace(a.region, namespace, client)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("\nnamespace: %s \t repositories: %#v\n\n", namespace, repos)
|
log.Debugf("\nnamespace: %s \t repositories: %#v\n\n", namespace, repos)
|
||||||
|
|
||||||
for _, repo := range repos {
|
for _, repo := range repos {
|
||||||
var ok bool
|
var ok bool
|
||||||
var repoName = filepath.Join(repo.RepoNamespace, repo.RepoName)
|
var repoName = filepath.Join(repo.Namespace, repo.Name)
|
||||||
ok, err = util.Match(repoPattern, repoName)
|
ok, err = util.Match(repoPattern, repoName)
|
||||||
log.Debugf("\n Repository: %s\t repoPattern: %s\t Match: %v\n", repoName, repoPattern, ok)
|
log.Debugf("\n Repository: %s\t repoPattern: %s\t Match: %v\n", repoName, repoPattern, ok)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return nil, err
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
repositories = append(repositories, repo)
|
repositories = append(repositories, repo)
|
||||||
@ -295,9 +311,9 @@ func (a *adapter) FetchArtifacts(filters []*model.Filter) (resources []*model.Re
|
|||||||
repo := r
|
repo := r
|
||||||
runner.AddTask(func() error {
|
runner.AddTask(func() error {
|
||||||
var tags []string
|
var tags []string
|
||||||
tags, err = a.getTags(repo, client)
|
tags, err = a.acrAPI.ListRepoTag(repo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("list tags for repo '%s' error: %v", repo.RepoName, err)
|
return fmt.Errorf("list tags for repo '%s' error: %v", repo.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var artifacts []*model.Artifact
|
var artifacts []*model.Artifact
|
||||||
@ -317,13 +333,12 @@ func (a *adapter) FetchArtifacts(filters []*model.Filter) (resources []*model.Re
|
|||||||
Registry: a.registry,
|
Registry: a.registry,
|
||||||
Metadata: &model.ResourceMetadata{
|
Metadata: &model.ResourceMetadata{
|
||||||
Repository: &model.Repository{
|
Repository: &model.Repository{
|
||||||
Name: filepath.Join(repo.RepoNamespace, repo.RepoName),
|
Name: filepath.Join(repo.Namespace, repo.Name),
|
||||||
},
|
},
|
||||||
Artifacts: filterArtifacts,
|
Artifacts: filterArtifacts,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -336,65 +351,5 @@ func (a *adapter) FetchArtifacts(filters []*model.Filter) (resources []*model.Re
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return resources, nil
|
||||||
}
|
|
||||||
|
|
||||||
func (a *adapter) listReposByNamespace(_ string, namespace string, c *cr.Client) (repos []aliRepo, err error) {
|
|
||||||
var reposReq = cr.CreateGetRepoListByNamespaceRequest()
|
|
||||||
var reposResp = cr.CreateGetRepoListByNamespaceResponse()
|
|
||||||
reposReq.SetDomain(a.domain)
|
|
||||||
reposReq.RepoNamespace = namespace
|
|
||||||
var page = 1
|
|
||||||
for {
|
|
||||||
reposReq.Page = requests.NewInteger(page)
|
|
||||||
reposResp, err = c.GetRepoListByNamespace(reposReq)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var resp = &aliReposResp{}
|
|
||||||
err = json.Unmarshal(reposResp.GetHttpContentBytes(), resp)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
repos = append(repos, resp.Data.Repos...)
|
|
||||||
|
|
||||||
if resp.Data.Total-(resp.Data.Page*resp.Data.PageSize) <= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
page++
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *adapter) getTags(repo aliRepo, c *cr.Client) (tags []string, err error) {
|
|
||||||
log.Debugf("[ali-acr.getTags]%s: %#v\n", a.domain, repo)
|
|
||||||
var tagsReq = cr.CreateGetRepoTagsRequest()
|
|
||||||
var tagsResp = cr.CreateGetRepoTagsResponse()
|
|
||||||
tagsReq.SetDomain(a.domain)
|
|
||||||
tagsReq.RepoNamespace = repo.RepoNamespace
|
|
||||||
tagsReq.RepoName = repo.RepoName
|
|
||||||
var page = 1
|
|
||||||
for {
|
|
||||||
tagsReq.Page = requests.NewInteger(page)
|
|
||||||
tagsResp, err = c.GetRepoTags(tagsReq)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp = &aliTagResp{}
|
|
||||||
err = json.Unmarshal(tagsResp.GetHttpContentBytes(), resp)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, tag := range resp.Data.Tags {
|
|
||||||
tags = append(tags, tag.Tag)
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.Data.Total-(resp.Data.Page*resp.Data.PageSize) <= 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
page++
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
@ -62,8 +62,6 @@ func getMockAdapter(t *testing.T, hasCred, health bool) (*adapter, *httptest.Ser
|
|||||||
}
|
}
|
||||||
return &adapter{
|
return &adapter{
|
||||||
Adapter: native.NewAdapter(registry),
|
Adapter: native.NewAdapter(registry),
|
||||||
region: "test-region",
|
|
||||||
domain: server.URL,
|
|
||||||
registry: registry,
|
registry: registry,
|
||||||
}, server
|
}, server
|
||||||
}
|
}
|
||||||
@ -79,46 +77,91 @@ func TestAdapter_Info(t *testing.T) {
|
|||||||
assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
|
assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_getRegion(t *testing.T) {
|
func Test_getRegistryURL(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
url string
|
url string
|
||||||
wantRegion string
|
want string
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{"registry shanghai", "https://registry.cn-shanghai.aliyuncs.com", "cn-shanghai", false},
|
{
|
||||||
{"invalid registry shanghai", "http://registry.cn-shanghai.aliyuncs.com", "", true},
|
"empty url",
|
||||||
{"registry hangzhou", "https://registry.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
|
"",
|
||||||
{"registry hangzhou vpc", "https://registry-vpc.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
|
"",
|
||||||
{"registry hangzhou internal", "https://registry-internal.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
|
true,
|
||||||
{"cr shanghai", "https://cr.cn-shanghai.aliyuncs.com", "cn-shanghai", false},
|
},
|
||||||
{"cr hangzhou", "https://cr.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
|
{
|
||||||
{"invalid cr url", "https://acr.cn-hangzhou.aliyuncs.com", "", true},
|
"just return url",
|
||||||
{"invalid registry url", "https://registry.cn-hangzhou.ali.com", "", true},
|
"https://cr.cn-hangzhou.aliyun.com",
|
||||||
|
"https://cr.cn-hangzhou.aliyun.com",
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"change match url",
|
||||||
|
"https://cr.cn-hangzhou.aliyuncs.com",
|
||||||
|
"https://registry.cn-hangzhou.aliyuncs.com",
|
||||||
|
false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
gotRegion, err := getRegion(tt.url)
|
got, err := getRegistryURL(tt.url)
|
||||||
if tt.wantErr {
|
if tt.wantErr {
|
||||||
assert.NotNil(t, err)
|
assert.NotNil(t, err)
|
||||||
}
|
}
|
||||||
assert.Equal(t, tt.wantRegion, gotRegion)
|
assert.Equal(t, tt.want, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var urlForBenchmark = []string{
|
func Test_parseRegistryService(t *testing.T) {
|
||||||
"https://cr.cn-hangzhou.aliyuncs.com",
|
tests := []struct {
|
||||||
"https://registry.cn-shanghai.aliyuncs.com",
|
name string
|
||||||
"https://registry-vpc.cn-shanghai.aliyuncs.com",
|
service string
|
||||||
"https://registry-internal.cn-shanghai.aliyuncs.com",
|
wantInfo *registryServiceInfo
|
||||||
}
|
wantErr bool
|
||||||
|
}{
|
||||||
func BenchmarkGetRegion(b *testing.B) {
|
{
|
||||||
for i := 0; i < b.N; i++ {
|
"not acr Service",
|
||||||
for _, url := range urlForBenchmark {
|
"otherregistry.cn-hangzhou:china",
|
||||||
getRegion(url)
|
nil,
|
||||||
}
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"empty Service",
|
||||||
|
"",
|
||||||
|
nil,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"acr ee service",
|
||||||
|
"registry.aliyuncs.com:cn-hangzhou:china:cri-xxxxxxxxx",
|
||||||
|
®istryServiceInfo{
|
||||||
|
IsACREE: true,
|
||||||
|
RegionID: "cn-hangzhou",
|
||||||
|
InstanceID: "cri-xxxxxxxxx",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"acr service",
|
||||||
|
"registry.aliyuncs.com:cn-hangzhou:26842",
|
||||||
|
®istryServiceInfo{
|
||||||
|
IsACREE: false,
|
||||||
|
RegionID: "cn-hangzhou",
|
||||||
|
InstanceID: "",
|
||||||
|
},
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
info, err := parseRegistryService(tt.service)
|
||||||
|
if tt.wantErr {
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
}
|
||||||
|
assert.Equal(t, tt.wantInfo, info)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,9 +175,6 @@ func Test_adapter_FetchArtifacts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
func Test_aliyunAuthCredential_isCacheTokenValid(t *testing.T) {
|
func Test_aliyunAuthCredential_isCacheTokenValid(t *testing.T) {
|
||||||
type fields struct {
|
type fields struct {
|
||||||
region string
|
|
||||||
accessKey string
|
|
||||||
secretKey string
|
|
||||||
cacheToken *registryTemporaryToken
|
cacheToken *registryTemporaryToken
|
||||||
cacheTokenExpiredAt time.Time
|
cacheTokenExpiredAt time.Time
|
||||||
}
|
}
|
||||||
@ -145,23 +185,22 @@ func Test_aliyunAuthCredential_isCacheTokenValid(t *testing.T) {
|
|||||||
fields fields
|
fields fields
|
||||||
want bool
|
want bool
|
||||||
}{
|
}{
|
||||||
{"nil cacheTokenExpiredAt", fields{"test-region", "MockAccessKey", "MockSecretKey", nil, nilTime}, false},
|
{"nil cacheTokenExpiredAt", fields{nil, nilTime}, false},
|
||||||
{"nil cacheToken", fields{"test-region", "MockAccessKey", "MockSecretKey", nil, time.Time{}}, false},
|
{"nil cacheToken", fields{nil, time.Time{}}, false},
|
||||||
{"expired", fields{"test-region", "MockAccessKey", "MockSecretKey", ®istryTemporaryToken{}, time.Now().AddDate(0, 0, -1)}, false},
|
{"expired", fields{®istryTemporaryToken{}, time.Now().AddDate(0, 0, -1)}, false},
|
||||||
{"ok", fields{"test-region", "MockAccessKey", "MockSecretKey", ®istryTemporaryToken{}, time.Now().AddDate(0, 0, 1)}, true},
|
{"ok", fields{®istryTemporaryToken{}, time.Now().AddDate(0, 0, 1)}, true},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
a := &aliyunAuthCredential{
|
a := &aliyunAuthCredential{
|
||||||
region: tt.fields.region,
|
|
||||||
accessKey: tt.fields.accessKey,
|
|
||||||
secretKey: tt.fields.secretKey,
|
|
||||||
cacheToken: tt.fields.cacheToken,
|
cacheToken: tt.fields.cacheToken,
|
||||||
cacheTokenExpiredAt: tt.fields.cacheTokenExpiredAt,
|
cacheTokenExpiredAt: tt.fields.cacheTokenExpiredAt,
|
||||||
}
|
}
|
||||||
if got := a.isCacheTokenValid(); got != tt.want {
|
if got := a.isCacheTokenValid(); got != tt.want {
|
||||||
|
fmt.Println(got)
|
||||||
assert.Equal(t, got, tt.want)
|
assert.Equal(t, got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,13 +15,10 @@
|
|||||||
package aliacr
|
package aliacr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aliyun/alibaba-cloud-sdk-go/services/cr"
|
|
||||||
|
|
||||||
"github.com/goharbor/harbor/src/common/http/modifier"
|
"github.com/goharbor/harbor/src/common/http/modifier"
|
||||||
"github.com/goharbor/harbor/src/lib/log"
|
"github.com/goharbor/harbor/src/lib/log"
|
||||||
)
|
)
|
||||||
@ -31,9 +28,7 @@ type Credential modifier.Modifier
|
|||||||
|
|
||||||
// Implements interface Credential
|
// Implements interface Credential
|
||||||
type aliyunAuthCredential struct {
|
type aliyunAuthCredential struct {
|
||||||
region string
|
acrAPI openapi
|
||||||
accessKey string
|
|
||||||
secretKey string
|
|
||||||
cacheToken *registryTemporaryToken
|
cacheToken *registryTemporaryToken
|
||||||
cacheTokenExpiredAt time.Time
|
cacheTokenExpiredAt time.Time
|
||||||
}
|
}
|
||||||
@ -46,11 +41,9 @@ type registryTemporaryToken struct {
|
|||||||
var _ Credential = &aliyunAuthCredential{}
|
var _ Credential = &aliyunAuthCredential{}
|
||||||
|
|
||||||
// NewAuth will get a temporary docker registry username and password via aliyun cr service API.
|
// NewAuth will get a temporary docker registry username and password via aliyun cr service API.
|
||||||
func NewAuth(region, accessKey, secretKey string) Credential {
|
func NewAuth(acrAPI openapi) Credential {
|
||||||
return &aliyunAuthCredential{
|
return &aliyunAuthCredential{
|
||||||
region: region,
|
acrAPI: acrAPI,
|
||||||
accessKey: accessKey,
|
|
||||||
secretKey: secretKey,
|
|
||||||
cacheToken: ®istryTemporaryToken{},
|
cacheToken: ®istryTemporaryToken{},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -58,27 +51,16 @@ func NewAuth(region, accessKey, secretKey string) Credential {
|
|||||||
func (a *aliyunAuthCredential) Modify(r *http.Request) (err error) {
|
func (a *aliyunAuthCredential) Modify(r *http.Request) (err error) {
|
||||||
if !a.isCacheTokenValid() {
|
if !a.isCacheTokenValid() {
|
||||||
log.Debugf("[aliyunAuthCredential.Modify.updateToken]Host: %s\n", r.Host)
|
log.Debugf("[aliyunAuthCredential.Modify.updateToken]Host: %s\n", r.Host)
|
||||||
var client *cr.Client
|
if a.acrAPI == nil {
|
||||||
client, err = cr.NewClientWithAccessKey(a.region, a.accessKey, a.secretKey)
|
return errors.New("acr api is nil")
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
v, err := a.acrAPI.GetAuthorizationToken()
|
||||||
var tokenRequest = cr.CreateGetAuthorizationTokenRequest()
|
|
||||||
var tokenResponse *cr.GetAuthorizationTokenResponse
|
|
||||||
tokenRequest.SetDomain(fmt.Sprintf(endpointTpl, a.region))
|
|
||||||
tokenResponse, err = client.GetAuthorizationToken(tokenRequest)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
var v authorizationToken
|
a.cacheTokenExpiredAt = v.expiresAt
|
||||||
err = json.Unmarshal(tokenResponse.GetHttpContentBytes(), &v)
|
a.cacheToken.user = v.user
|
||||||
if err != nil {
|
a.cacheToken.password = v.password
|
||||||
return
|
|
||||||
}
|
|
||||||
a.cacheTokenExpiredAt = v.Data.ExpireDate.ToTime()
|
|
||||||
a.cacheToken.user = v.Data.TempUserName
|
|
||||||
a.cacheToken.password = v.Data.AuthorizationToken
|
|
||||||
} else {
|
} else {
|
||||||
log.Debug("[aliyunAuthCredential] USE CACHE TOKEN!!!")
|
log.Debug("[aliyunAuthCredential] USE CACHE TOKEN!!!")
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user