mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-24 19:25:19 +01:00
Merge branch 'main' into reduce-image-size
This commit is contained in:
commit
0c0ca76128
20
.github/workflows/CI.yml
vendored
20
.github/workflows/CI.yml
vendored
@ -41,10 +41,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -102,10 +102,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -157,10 +157,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -212,10 +212,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -265,10 +265,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
2
.github/workflows/build-package.yml
vendored
2
.github/workflows/build-package.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- name: Setup Docker
|
||||
uses: docker-practice/actions-setup-docker@master
|
||||
|
2
.github/workflows/conformance_test.yml
vendored
2
.github/workflows/conformance_test.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.22.3
|
||||
go-version: 1.23.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
2
.github/workflows/publish_release.yml
vendored
2
.github/workflows/publish_release.yml
vendored
@ -60,7 +60,7 @@ jobs:
|
||||
docker load -i ./harbor/harbor.${{ env.BASE_TAG }}.tar.gz
|
||||
images="$(docker images --format "{{.Repository}}" --filter=reference='goharbor/*:${{ env.BASE_TAG }}' | xargs)"
|
||||
source tools/release/release_utils.sh
|
||||
publishImages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} ${{ secrets.DOCKER_HUB_USERNAME }} ${{ secrets.DOCKER_HUB_PASSWORD }} $images
|
||||
publishImages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} "${{ secrets.DOCKER_HUB_USERNAME }}" "${{ secrets.DOCKER_HUB_PASSWORD }}" $images
|
||||
publishPackages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} ${{ github.actor }} ${{ secrets.GITHUB_TOKEN }} $images
|
||||
- name: Generate release notes
|
||||
run: |
|
||||
|
@ -142,30 +142,31 @@ The folder graph below shows the structure of the source code folder `harbor/src
|
||||
#### Go
|
||||
Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbor backend service development environment, please [set one up](https://golang.org/doc/install).
|
||||
|
||||
| Harbor | Requires Go |
|
||||
|----------|---------------|
|
||||
| 1.1 | 1.7.3 |
|
||||
| 1.2 | 1.7.3 |
|
||||
| 1.3 | 1.9.2 |
|
||||
| 1.4 | 1.9.2 |
|
||||
| 1.5 | 1.9.2 |
|
||||
| 1.6 | 1.9.2 |
|
||||
| 1.7 | 1.9.2 |
|
||||
| 1.8 | 1.11.2 |
|
||||
| 1.9 | 1.12.12 |
|
||||
| 1.10 | 1.12.12 |
|
||||
| 2.0 | 1.13.15 |
|
||||
| 2.1 | 1.14.13 |
|
||||
| 2.2 | 1.15.6 |
|
||||
| 2.3 | 1.15.12 |
|
||||
| 2.4 | 1.17.7 |
|
||||
| 2.5 | 1.17.7 |
|
||||
| 2.6 | 1.18.6 |
|
||||
| 2.7 | 1.19.4 |
|
||||
| 2.8 | 1.20.6 |
|
||||
| 2.9 | 1.21.3 |
|
||||
| 2.10 | 1.21.8 |
|
||||
| 2.11 | 1.22.3 |
|
||||
| Harbor | Requires Go |
|
||||
|--------|-------------|
|
||||
| 1.1 | 1.7.3 |
|
||||
| 1.2 | 1.7.3 |
|
||||
| 1.3 | 1.9.2 |
|
||||
| 1.4 | 1.9.2 |
|
||||
| 1.5 | 1.9.2 |
|
||||
| 1.6 | 1.9.2 |
|
||||
| 1.7 | 1.9.2 |
|
||||
| 1.8 | 1.11.2 |
|
||||
| 1.9 | 1.12.12 |
|
||||
| 1.10 | 1.12.12 |
|
||||
| 2.0 | 1.13.15 |
|
||||
| 2.1 | 1.14.13 |
|
||||
| 2.2 | 1.15.6 |
|
||||
| 2.3 | 1.15.12 |
|
||||
| 2.4 | 1.17.7 |
|
||||
| 2.5 | 1.17.7 |
|
||||
| 2.6 | 1.18.6 |
|
||||
| 2.7 | 1.19.4 |
|
||||
| 2.8 | 1.20.6 |
|
||||
| 2.9 | 1.21.3 |
|
||||
| 2.10 | 1.21.8 |
|
||||
| 2.11 | 1.22.3 |
|
||||
| 2.12 | 1.23.2 |
|
||||
|
||||
|
||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||
@ -276,7 +277,7 @@ To build the code, please refer to [build](https://goharbor.io/docs/edge/build-c
|
||||
|
||||
**Note**: from v2.0, Harbor uses [go-swagger](https://github.com/go-swagger/go-swagger) to generate API server from Swagger 2.0 (aka [OpenAPI 2.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)). To add or change the APIs, first update the `api/v2.0/swagger.yaml` file, then run `make gen_apis` to generate the API server, finally, implement or update the API handlers in `src/server/v2.0/handler` package.
|
||||
|
||||
As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add `//go:generate mockery xxx` comment with mockery command in the subpackages of `src/testing`, then run `make gen_mocks` to generate mocks.
|
||||
As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks.
|
||||
|
||||
### Keep sync with upstream
|
||||
|
||||
|
23
Makefile
23
Makefile
@ -104,16 +104,18 @@ PREPARE_VERSION_NAME=versions
|
||||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.8.3-patch-redis
|
||||
TRIVYVERSION=v0.51.2
|
||||
TRIVYADAPTERVERSION=v0.31.2
|
||||
TRIVYVERSION=v0.56.1
|
||||
TRIVYADAPTERVERSION=v0.32.0-rc.1
|
||||
|
||||
# version of registry for pulling the source code
|
||||
REGISTRY_SRC_TAG=v2.8.3
|
||||
# source of upstream distribution code
|
||||
DISTRIBUTION_SRC=https://github.com/distribution/distribution.git
|
||||
|
||||
# dependency binaries
|
||||
REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry
|
||||
TRIVY_DOWNLOAD_URL=https://github.com/aquasecurity/trivy/releases/download/$(TRIVYVERSION)/trivy_$(TRIVYVERSION:v%=%)_Linux-64bit.tar.gz
|
||||
TRIVY_ADAPTER_DOWNLOAD_URL=https://github.com/aquasecurity/harbor-scanner-trivy/releases/download/$(TRIVYADAPTERVERSION)/harbor-scanner-trivy_$(TRIVYADAPTERVERSION:v%=%)_Linux_x86_64.tar.gz
|
||||
TRIVY_ADAPTER_DOWNLOAD_URL=https://github.com/goharbor/harbor-scanner-trivy/archive/refs/tags/$(TRIVYADAPTERVERSION).tar.gz
|
||||
|
||||
define VERSIONS_FOR_PREPARE
|
||||
VERSION_TAG: $(VERSIONTAG)
|
||||
@ -140,7 +142,7 @@ GOINSTALL=$(GOCMD) install
|
||||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=golang:1.22.3
|
||||
GOBUILDIMAGE=golang:1.23.2
|
||||
GOBUILDPATHINCONTAINER=/harbor
|
||||
|
||||
# go build
|
||||
@ -163,7 +165,6 @@ GOIMAGEBUILD_CORE=$(GOIMAGEBUILDCMD) $(GOFLAGS) ${GOTAGS} --ldflags "-w -s $(COR
|
||||
GOBUILDPATH_CORE=$(GOBUILDPATHINCONTAINER)/src/core
|
||||
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATHINCONTAINER)/src/jobservice
|
||||
GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATHINCONTAINER)/src/registryctl
|
||||
GOBUILDPATH_MIGRATEPATCH=$(GOBUILDPATHINCONTAINER)/src/cmd/migrate-patch
|
||||
GOBUILDPATH_STANDALONE_DB_MIGRATOR=$(GOBUILDPATHINCONTAINER)/src/cmd/standalone-db-migrator
|
||||
GOBUILDPATH_EXPORTER=$(GOBUILDPATHINCONTAINER)/src/cmd/exporter
|
||||
GOBUILDMAKEPATH=make
|
||||
@ -180,7 +181,6 @@ JOBSERVICEBINARYPATH=$(BUILDPATH)/$(GOBUILDMAKEPATH_JOBSERVICE)
|
||||
JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
REGISTRYCTLBINARYPATH=$(BUILDPATH)/$(GOBUILDMAKEPATH_REGISTRYCTL)
|
||||
REGISTRYCTLBINARYNAME=harbor_registryctl
|
||||
MIGRATEPATCHBINARYNAME=migrate-patch
|
||||
STANDALONE_DB_MIGRATOR_BINARYPATH=$(BUILDPATH)/$(GOBUILDMAKEPATH_STANDALONE_DB_MIGRATOR)
|
||||
STANDALONE_DB_MIGRATOR_BINARYNAME=migrate
|
||||
|
||||
@ -282,7 +282,7 @@ endef
|
||||
|
||||
# lint swagger doc
|
||||
SPECTRAL_IMAGENAME=$(IMAGENAMESPACE)/spectral
|
||||
SPECTRAL_VERSION=v6.1.0
|
||||
SPECTRAL_VERSION=v6.11.1
|
||||
SPECTRAL_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/spectral/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg SPECTRAL_VERSION=${SPECTRAL_VERSION} -t ${SPECTRAL_IMAGENAME}:$(SPECTRAL_VERSION) .
|
||||
SPECTRAL=$(RUNCONTAINER) $(SPECTRAL_IMAGENAME):$(SPECTRAL_VERSION)
|
||||
|
||||
@ -312,13 +312,13 @@ gen_apis: lint_apis
|
||||
|
||||
|
||||
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
||||
MOCKERY_VERSION=v2.42.2
|
||||
MOCKERY=$(RUNCONTAINER) ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_VERSION=v2.46.2
|
||||
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
||||
|
||||
gen_mocks:
|
||||
$(call prepare_docker_image,${MOCKERY_IMAGENAME},${MOCKERY_VERSION},${MOCKERY_IMAGE_BUILD_CMD})
|
||||
${MOCKERY} go generate ./...
|
||||
${MOCKERY} mockery
|
||||
|
||||
mocks_check: gen_mocks
|
||||
@echo checking mocks...
|
||||
@ -388,7 +388,7 @@ build:
|
||||
exit 1; \
|
||||
fi
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile $(BUILDTARGET) -e DEVFLAG=$(DEVFLAG) -e GOBUILDIMAGE=$(GOBUILDIMAGE) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) -e DISTRIBUTION_SRC=$(DISTRIBUTION_SRC)\
|
||||
-e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \
|
||||
-e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e BUILDBIN=$(BUILDBIN) \
|
||||
@ -546,7 +546,6 @@ cleanbinary:
|
||||
if [ -f $(CORE_BINARYPATH)/$(CORE_BINARYNAME) ] ; then rm $(CORE_BINARYPATH)/$(CORE_BINARYNAME) ; fi
|
||||
if [ -f $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ] ; then rm $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ; fi
|
||||
if [ -f $(REGISTRYCTLBINARYPATH)/$(REGISTRYCTLBINARYNAME) ] ; then rm $(REGISTRYCTLBINARYPATH)/$(REGISTRYCTLBINARYNAME) ; fi
|
||||
if [ -f $(MIGRATEPATCHBINARYPATH)/$(MIGRATEPATCHBINARYNAME) ] ; then rm $(MIGRATEPATCHBINARYPATH)/$(MIGRATEPATCHBINARYNAME) ; fi
|
||||
rm -rf make/photon/*/binary/
|
||||
|
||||
cleanbaseimage:
|
||||
|
@ -109,7 +109,7 @@ paths:
|
||||
operationId: searchLdapUser
|
||||
summary: Search available ldap users.
|
||||
description: |
|
||||
This endpoint searches the available ldap users based on related configuration parameters. Support searched by input ladp configuration, load configuration from the system and specific filter.
|
||||
This endpoint searches the available ldap users based on related configuration parameters. Support searched by input ldap configuration, load configuration from the system and specific filter.
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
- name: username
|
||||
@ -1548,6 +1548,88 @@ paths:
|
||||
$ref: '#/responses/409'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name_or_id}/artifacts:
|
||||
get:
|
||||
summary: List artifacts
|
||||
description: List artifacts of the specified project
|
||||
tags:
|
||||
- project
|
||||
operationId: listArtifactsOfProject
|
||||
parameters:
|
||||
- $ref: '#/parameters/requestId'
|
||||
- $ref: '#/parameters/isResourceName'
|
||||
- $ref: '#/parameters/projectNameOrId'
|
||||
- $ref: '#/parameters/query'
|
||||
- $ref: '#/parameters/sort'
|
||||
- $ref: '#/parameters/page'
|
||||
- $ref: '#/parameters/pageSize'
|
||||
- $ref: '#/parameters/acceptVulnerabilities'
|
||||
- name: with_tag
|
||||
in: query
|
||||
description: Specify whether the tags are included inside the returning artifacts
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
- name: with_label
|
||||
in: query
|
||||
description: Specify whether the labels are included inside the returning artifacts
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_scan_overview
|
||||
in: query
|
||||
description: Specify whether the scan overview is included inside the returning artifacts
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_sbom_overview
|
||||
in: query
|
||||
description: Specify whether the SBOM overview is included in returning artifacts, when this option is true, the SBOM overview will be included in the response
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_immutable_status
|
||||
in: query
|
||||
description: Specify whether the immutable status is included inside the tags of the returning artifacts. Only works when setting "with_immutable_status=true"
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_accessory
|
||||
in: query
|
||||
description: Specify whether the accessories are included of the returning artifacts. Only works when setting "with_accessory=true"
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: latest_in_repository
|
||||
in: query
|
||||
description: Specify whether only the latest pushed artifact of each repository is included inside the returning artifacts. Only works when either artifact_type or media_type is included in the query.
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
headers:
|
||||
X-Total-Count:
|
||||
description: The total count of artifacts
|
||||
type: integer
|
||||
Link:
|
||||
description: Link refers to the previous page and next page
|
||||
type: string
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/Artifact'
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
'/projects/{project_name_or_id}/scanner':
|
||||
get:
|
||||
summary: Get project level scanner
|
||||
@ -6586,6 +6668,9 @@ definitions:
|
||||
manifest_media_type:
|
||||
type: string
|
||||
description: The manifest media type of the artifact
|
||||
artifact_type:
|
||||
type: string
|
||||
description: The artifact_type in the manifest of the artifact
|
||||
project_id:
|
||||
type: integer
|
||||
format: int64
|
||||
@ -6594,6 +6679,9 @@ definitions:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The ID of the repository that the artifact belongs to
|
||||
repository_name:
|
||||
type: string
|
||||
description: The name of the repository that the artifact belongs to
|
||||
digest:
|
||||
type: string
|
||||
description: The digest of the artifact
|
||||
@ -7007,6 +7095,9 @@ definitions:
|
||||
type: boolean
|
||||
description: Whether the preheat policy enabled
|
||||
x-omitempty: false
|
||||
scope:
|
||||
type: string
|
||||
description: The scope of preheat policy
|
||||
creation_time:
|
||||
type: string
|
||||
format: date-time
|
||||
@ -7252,6 +7343,10 @@ definitions:
|
||||
type: string
|
||||
description: 'The ID of the tag retention policy for the project'
|
||||
x-nullable: true
|
||||
proxy_speed_kb:
|
||||
type: string
|
||||
description: 'The bandwidth limit of proxy cache, in Kbps (kilobits per second). It limits the communication between Harbor and the upstream registry, not the client and the Harbor.'
|
||||
x-nullable: true
|
||||
ProjectSummary:
|
||||
type: object
|
||||
properties:
|
||||
@ -7754,6 +7849,12 @@ definitions:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/RobotPermission'
|
||||
creator_type:
|
||||
type: string
|
||||
description: The type of the robot creator, like local(harbor_user) or robot.
|
||||
creator_ref:
|
||||
type: integer
|
||||
description: The reference of the robot creator, like the id of harbor user.
|
||||
creation_time:
|
||||
type: string
|
||||
format: date-time
|
||||
@ -8897,6 +8998,9 @@ definitions:
|
||||
ldap_group_search_scope:
|
||||
$ref: '#/definitions/IntegerConfigItem'
|
||||
description: The scope to search ldap group. ''0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE''
|
||||
ldap_group_attach_parallel:
|
||||
$ref: '#/definitions/BoolConfigItem'
|
||||
description: Attach LDAP user group information in parallel.
|
||||
ldap_scope:
|
||||
$ref: '#/definitions/IntegerConfigItem'
|
||||
description: The scope to search ldap users,'0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE'
|
||||
@ -9087,6 +9191,11 @@ definitions:
|
||||
description: The scope to search ldap group. ''0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE''
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_group_attach_parallel:
|
||||
type: boolean
|
||||
description: Attach LDAP user group information in parallel, the parallel worker count is 5
|
||||
x-omitempty: true
|
||||
x-isnullable: true
|
||||
ldap_scope:
|
||||
type: integer
|
||||
description: The scope to search ldap users,'0-LDAP_SCOPE_BASE, 1-LDAP_SCOPE_ONELEVEL, 2-LDAP_SCOPE_SUBTREE'
|
||||
|
@ -48,7 +48,7 @@ harbor_admin_password: Harbor12345
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
# The password for the user('postgres' by default) of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
@ -174,7 +174,7 @@ log:
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.11.0
|
||||
_version: 2.12.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
|
7
make/migrations/postgresql/0150_2.12.0_schema.up.sql
Normal file
7
make/migrations/postgresql/0150_2.12.0_schema.up.sql
Normal file
@ -0,0 +1,7 @@
|
||||
/*
|
||||
Add new column creator_ref and creator_type for robot table to record the creator information of the robot
|
||||
*/
|
||||
ALTER TABLE robot ADD COLUMN IF NOT EXISTS creator_ref integer default 0;
|
||||
ALTER TABLE robot ADD COLUMN IF NOT EXISTS creator_type varchar(255);
|
||||
|
||||
ALTER TABLE p2p_preheat_policy ADD COLUMN IF NOT EXISTS scope varchar(255);
|
@ -178,7 +178,7 @@ _build_registry:
|
||||
rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \
|
||||
$(call _get_binary, $(REGISTRYURL), $(DOCKERFILEPATH_REG)/binary/registry); \
|
||||
else \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) && cd - ; \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) && cd - ; \
|
||||
fi
|
||||
@echo "building registry container for photon..."
|
||||
@chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(VERSIONTAG) .
|
||||
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
||||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.11.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.12.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
@ -2,4 +2,4 @@ import os
|
||||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0', '2.12.0'}
|
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
21
make/photon/prepare/migrations/version_2_12_0/__init__.py
Normal file
@ -0,0 +1,21 @@
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.12.0'
|
||||
down_revisions = ['2.11.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
autoescape = select_autoescape()
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
737
make/photon/prepare/migrations/version_2_12_0/harbor.yml.jinja
Normal file
@ -0,0 +1,737 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
{% if ip_family is defined %}
|
||||
ip_family:
|
||||
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
{% if ip_family.ipv6 is defined %}
|
||||
ipv6:
|
||||
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||
{% else %}
|
||||
ipv6:
|
||||
enabled: false
|
||||
{% endif %}
|
||||
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
{% if ip_family.ipv4 is defined %}
|
||||
ipv4:
|
||||
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||
{% else %}
|
||||
ipv4:
|
||||
enabled: true
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_lifetime is defined %}
|
||||
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||
{% else %}
|
||||
conn_max_lifetime: 5m
|
||||
{% endif %}
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_idle_time is defined %}
|
||||
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||
{% else %}
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_lifetime: 5m
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
{% if storage_service.redirect.disabled is defined %}
|
||||
disable: {{ storage_service.redirect.disabled | lower}}
|
||||
{% else %}
|
||||
disable: {{ storage_service.redirect.disable | lower}}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disable: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
{% if trivy.skip_java_db_update is defined %}
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||
{% else %}
|
||||
skip_java_db_update: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.offline_scan is defined %}
|
||||
offline_scan: {{ trivy.offline_scan | lower }}
|
||||
{% else %}
|
||||
offline_scan: false
|
||||
{% endif %}
|
||||
#
|
||||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||
{% if trivy.security_check is defined %}
|
||||
security_check: {{ trivy.security_check }}
|
||||
{% else %}
|
||||
security_check: vuln
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.timeout is defined %}
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: {{ trivy.timeout}}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
# skip_java_db_update: false
|
||||
# #
|
||||
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# # It would work if all the dependencies are in local.
|
||||
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
# offline_scan: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # timeout The duration to wait for scan completion.
|
||||
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
# timeout: 5m0s
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
{% if jobservice.job_loggers is defined %}
|
||||
job_loggers:
|
||||
{% for job_logger in jobservice.job_loggers %}
|
||||
- {{job_logger}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
{% endif %}
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
{% if jobservice.logger_sweeper_duration is defined %}
|
||||
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||
{% else %}
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
# HTTP client timeout for webhook job
|
||||
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||
{% else %}
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 3
|
||||
# HTTP client timeout for webhook job
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.12.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
{% endif %}
|
||||
|
||||
{% if redis is defined %}
|
||||
redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
{% if redis.registry_db_index is defined %}
|
||||
registry_db_index: {{ redis.registry_db_index }}
|
||||
{% else %}
|
||||
# # registry_db_index: 1
|
||||
{% endif %}
|
||||
{% if redis.jobservice_db_index is defined %}
|
||||
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||
{% else %}
|
||||
# # jobservice_db_index: 2
|
||||
{% endif %}
|
||||
{% if redis.trivy_db_index is defined %}
|
||||
trivy_db_index: {{ redis.trivy_db_index }}
|
||||
{% else %}
|
||||
# # trivy_db_index: 5
|
||||
{% endif %}
|
||||
{% if redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
{% if external_redis.username is defined %}
|
||||
username: {{ external_redis.username }}
|
||||
{% else %}
|
||||
# username:
|
||||
{% endif %}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% if external_redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if external_redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
# # username:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to differentiate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# # timeout is in seconds
|
||||
# timeout: 10
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # # timeout is in seconds
|
||||
# # timeout: 10
|
||||
{% endif %}
|
||||
|
||||
# enable purge _upload directories
|
||||
{% if upload_purging is defined %}
|
||||
upload_purging:
|
||||
enabled: {{ upload_purging.enabled | lower}}
|
||||
age: {{ upload_purging.age }}
|
||||
interval: {{ upload_purging.interval }}
|
||||
dryrun: {{ upload_purging.dryrun | lower}}
|
||||
{% else %}
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
{% endif %}
|
||||
|
||||
# Cache layer related config
|
||||
{% if cache is defined %}
|
||||
cache:
|
||||
enabled: {{ cache.enabled | lower}}
|
||||
expire_hours: {{ cache.expire_hours }}
|
||||
{% else %}
|
||||
cache:
|
||||
enabled: false
|
||||
expire_hours: 24
|
||||
{% endif %}
|
||||
|
||||
# Harbor core configurations
|
||||
# Uncomment to enable the following harbor core related configuration items.
|
||||
{% if core is defined %}
|
||||
core:
|
||||
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# by default is implemented by db but you can switch the updation via redis which
|
||||
# can improve the performance of high concurrent pushing to the same project,
|
||||
# and reduce the database connections spike and occupies.
|
||||
# By redis will bring up some delay for quota usage updation for display, so only
|
||||
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
quota_update_provider: {{ core.quota_update_provider }}
|
||||
{% else %}
|
||||
# core:
|
||||
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# # by default is implemented by db but you can switch the updation via redis which
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
{% endif %}
|
@ -40,7 +40,7 @@ REGISTRY_CREDENTIAL_USERNAME={{registry_username}}
|
||||
REGISTRY_CREDENTIAL_PASSWORD={{registry_password}}
|
||||
CSRF_KEY={{csrf_key}}
|
||||
ROBOT_SCANNER_NAME_PREFIX={{scan_robot_prefix}}
|
||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
||||
PERMITTED_REGISTRY_TYPES_FOR_PROXY_CACHE=docker-hub,harbor,azure-acr,ali-acr,aws-ecr,google-gcr,quay,docker-registry,github-ghcr,jfrog-artifactory
|
||||
|
||||
HTTP_PROXY={{core_http_proxy}}
|
||||
HTTPS_PROXY={{core_https_proxy}}
|
||||
|
@ -1,4 +1,3 @@
|
||||
version: '2.3'
|
||||
services:
|
||||
log:
|
||||
image: goharbor/harbor-log:{{version}}
|
||||
|
@ -101,6 +101,9 @@ http {
|
||||
|
||||
proxy_buffering off;
|
||||
proxy_request_buffering off;
|
||||
|
||||
proxy_send_timeout 900;
|
||||
proxy_read_timeout 900;
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.22.3
|
||||
FROM golang:1.23.2
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
@ -7,7 +7,13 @@ if [ -z $1 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
error "Please set the 'distribution_src' variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
DISTRIBUTION_SRC="$2"
|
||||
|
||||
set -e
|
||||
|
||||
@ -20,7 +26,7 @@ cur=$PWD
|
||||
|
||||
# the temp folder to store distribution source code...
|
||||
TEMP=`mktemp -d ${TMPDIR-/tmp}/distribution.XXXXXX`
|
||||
git clone -b $VERSION https://github.com/distribution/distribution.git $TEMP
|
||||
git clone -b $VERSION $DISTRIBUTION_SRC $TEMP
|
||||
|
||||
# add patch redis
|
||||
cd $TEMP
|
||||
|
@ -1,7 +1,7 @@
|
||||
FROM golang:1.22.3
|
||||
FROM golang:1.23.2
|
||||
|
||||
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
ADD . /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/goharbor/harbor-scanner-trivy/
|
||||
|
||||
RUN export GOOS=linux GO111MODULE=on CGO_ENABLED=0 && \
|
||||
go build -o scanner-trivy cmd/scanner-trivy/main.go
|
||||
|
@ -16,16 +16,16 @@ cur=$PWD
|
||||
|
||||
# The temporary directory to clone Trivy adapter source code
|
||||
TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
||||
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
|
||||
git clone https://github.com/goharbor/harbor-scanner-trivy.git $TEMP
|
||||
cd $TEMP; git checkout $VERSION; cd -
|
||||
|
||||
echo "Building Trivy adapter binary based on golang:1.22.3..."
|
||||
echo "Building Trivy adapter binary based on golang:1.23.2..."
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
|
||||
echo "Copying Trivy adapter binary from the container to the local directory..."
|
||||
ID=$(docker create trivy-adapter-golang)
|
||||
docker cp $ID:/go/src/github.com/aquasecurity/harbor-scanner-trivy/scanner-trivy binary
|
||||
docker cp $ID:/go/src/github.com/goharbor/harbor-scanner-trivy/scanner-trivy binary
|
||||
|
||||
docker rm -f $ID
|
||||
docker rmi -f trivy-adapter-golang
|
||||
|
@ -50,12 +50,19 @@ fi
|
||||
secret_dir=${data_path}/secret
|
||||
config_dir=$harbor_prepare_path/common/config
|
||||
|
||||
# Set the prepare base dir, for mac, it should be $HOME, for linux, it should be /
|
||||
# The certificate and the data directory in harbor.yaml should be sub directories of $HOME when installing Harbor in MacOS
|
||||
prepare_base_dir=/
|
||||
if [ "$(uname)" == "Darwin" ]; then
|
||||
prepare_base_dir=$HOME
|
||||
fi
|
||||
|
||||
# Run prepare script
|
||||
docker run --rm -v $input_dir:/input \
|
||||
-v $data_path:/data \
|
||||
-v $harbor_prepare_path:/compose_location \
|
||||
-v $config_dir:/config \
|
||||
-v /:/hostfs \
|
||||
-v ${prepare_base_dir}:/hostfs${prepare_base_dir} \
|
||||
--privileged \
|
||||
goharbor/prepare:dev prepare $@
|
||||
|
||||
|
569
src/.mockery.yaml
Normal file
569
src/.mockery.yaml
Normal file
@ -0,0 +1,569 @@
|
||||
with-expecter: false
|
||||
outpkg: "{{.PackageName}}"
|
||||
mockname: "{{.InterfaceName}}"
|
||||
filename: "{{.InterfaceName | snakecase}}.go"
|
||||
packages:
|
||||
# controller related mocks
|
||||
github.com/goharbor/harbor/src/controller/artifact:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/artifact
|
||||
github.com/goharbor/harbor/src/controller/artifact/processor:
|
||||
interfaces:
|
||||
Processor:
|
||||
config:
|
||||
dir: testing/pkg/processor
|
||||
github.com/goharbor/harbor/src/controller/artifact/annotation:
|
||||
interfaces:
|
||||
Parser:
|
||||
config:
|
||||
dir: testing/pkg/parser
|
||||
outpkg: parser
|
||||
github.com/goharbor/harbor/src/controller/blob:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/blob
|
||||
github.com/goharbor/harbor/src/controller/project:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/project
|
||||
github.com/goharbor/harbor/src/controller/quota:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/quota
|
||||
github.com/goharbor/harbor/src/controller/scan:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scan
|
||||
Checker:
|
||||
config:
|
||||
dir: testing/controller/scan
|
||||
github.com/goharbor/harbor/src/controller/scanner:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scanner
|
||||
github.com/goharbor/harbor/src/controller/replication:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/replication
|
||||
github.com/goharbor/harbor/src/controller/replication/flow:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: controller/replication
|
||||
outpkg: replication
|
||||
mockname: flowController
|
||||
filename: mock_flow_controller_test.go
|
||||
registryAdapter:
|
||||
config:
|
||||
dir: controller/replication/flow
|
||||
outpkg: flow
|
||||
mockname: mockAdapter
|
||||
filename: mock_adapter_test.go
|
||||
github.com/goharbor/harbor/src/controller/robot:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/robot
|
||||
github.com/goharbor/harbor/src/controller/proxy:
|
||||
interfaces:
|
||||
RemoteInterface:
|
||||
config:
|
||||
dir: testing/controller/proxy
|
||||
github.com/goharbor/harbor/src/controller/retention:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/retention
|
||||
github.com/goharbor/harbor/src/controller/config:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/config
|
||||
github.com/goharbor/harbor/src/controller/user:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/user
|
||||
github.com/goharbor/harbor/src/controller/repository:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/repository
|
||||
github.com/goharbor/harbor/src/controller/purge:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/purge
|
||||
github.com/goharbor/harbor/src/controller/jobservice:
|
||||
interfaces:
|
||||
SchedulerController:
|
||||
config:
|
||||
dir: testing/controller/jobservice
|
||||
github.com/goharbor/harbor/src/controller/systemartifact:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/systemartifact
|
||||
github.com/goharbor/harbor/src/controller/scandataexport:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scandataexport
|
||||
github.com/goharbor/harbor/src/controller/task:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/task
|
||||
ExecutionController:
|
||||
config:
|
||||
dir: testing/controller/task
|
||||
github.com/goharbor/harbor/src/controller/webhook:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/webhook
|
||||
github.com/goharbor/harbor/src/controller/securityhub:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/securityhub
|
||||
|
||||
# jobservice related mocks
|
||||
github.com/goharbor/harbor/src/jobservice/mgt:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: jobservice/mgt
|
||||
outpkg: mgt
|
||||
mockname: MockManager
|
||||
filename: mock_manager.go
|
||||
github.com/goharbor/harbor/src/jobservice/period:
|
||||
interfaces:
|
||||
Scheduler:
|
||||
config:
|
||||
dir: jobservice/period
|
||||
outpkg: period
|
||||
mockname: MockScheduler
|
||||
filename: mock_scheduler.go
|
||||
inpackage: True
|
||||
|
||||
# common and lib related mocks
|
||||
github.com/goharbor/harbor/src/lib/cache:
|
||||
interfaces:
|
||||
Cache:
|
||||
configs:
|
||||
- dir: lib/cache
|
||||
outpkg: cache
|
||||
mockname: mockCache
|
||||
filename: mock_cache_test.go
|
||||
inpackage: True
|
||||
- dir: testing/lib/cache
|
||||
Iterator:
|
||||
config:
|
||||
dir: testing/lib/cache
|
||||
github.com/goharbor/harbor/src/lib/orm:
|
||||
interfaces:
|
||||
Creator:
|
||||
config:
|
||||
dir: testing/lib/orm
|
||||
github.com/goharbor/harbor/src/lib/config:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/lib/config
|
||||
github.com/goharbor/harbor/src/common/job:
|
||||
interfaces:
|
||||
Client:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockJobserviceClient
|
||||
filename: mock_jobservice_client_test.go
|
||||
github.com/goharbor/harbor/src/common/security:
|
||||
interfaces:
|
||||
Context:
|
||||
config:
|
||||
dir: testing/common/security
|
||||
|
||||
# pkg related mocks
|
||||
github.com/goharbor/harbor/src/pkg/artifact:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/artifact
|
||||
github.com/goharbor/harbor/src/pkg/artifactrash:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/artifactrash
|
||||
github.com/goharbor/harbor/src/pkg/blob:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/blob
|
||||
github.com/goharbor/harbor/src/pkg/project:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/project
|
||||
github.com/goharbor/harbor/src/pkg/project/metadata:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/project/metadata
|
||||
github.com/goharbor/harbor/src/pkg/quota:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/quota
|
||||
github.com/goharbor/harbor/src/pkg/quota/driver:
|
||||
interfaces:
|
||||
Driver:
|
||||
config:
|
||||
dir: testing/pkg/quota/driver
|
||||
github.com/goharbor/harbor/src/pkg/scan:
|
||||
interfaces:
|
||||
Handler:
|
||||
config:
|
||||
dir: testing/pkg/scan
|
||||
github.com/goharbor/harbor/src/pkg/scan/postprocessors:
|
||||
interfaces:
|
||||
NativeScanReportConverter:
|
||||
config:
|
||||
dir: testing/pkg/scan/postprocessors
|
||||
github.com/goharbor/harbor/src/pkg/scan/report:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/report
|
||||
github.com/goharbor/harbor/src/pkg/scan/rest/v1:
|
||||
config:
|
||||
dir: testing/pkg/scan/rest/v1
|
||||
all: True
|
||||
github.com/goharbor/harbor/src/pkg/scan/scanner:
|
||||
config:
|
||||
dir: testing/pkg/scan/scanner
|
||||
all: True
|
||||
github.com/goharbor/harbor/src/pkg/scheduler:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: pkg/scheduler
|
||||
outpkg: scheduler
|
||||
mockname: mockDAO
|
||||
filename: mock_dao_test.go
|
||||
inpackage: True
|
||||
Scheduler:
|
||||
config:
|
||||
dir: testing/pkg/scheduler
|
||||
github.com/goharbor/harbor/src/pkg/task:
|
||||
interfaces:
|
||||
Manager:
|
||||
configs:
|
||||
- dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockTaskManager
|
||||
filename: mock_task_manager_test.go
|
||||
inpackage: True
|
||||
- dir: testing/pkg/task
|
||||
SweepManager:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockSweepManager
|
||||
filename: mock_sweep_manager_test.go
|
||||
inpackage: True
|
||||
ExecutionManager:
|
||||
config:
|
||||
dir: testing/pkg/task
|
||||
github.com/goharbor/harbor/src/pkg/task/dao:
|
||||
interfaces:
|
||||
TaskDAO:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockTaskDAO
|
||||
filename: mock_task_dao_test.go
|
||||
ExecutionDAO:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockExecutionDAO
|
||||
filename: mock_execution_dao_test.go
|
||||
github.com/goharbor/harbor/src/pkg/user:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/user
|
||||
github.com/goharbor/harbor/src/pkg/user/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/user/dao
|
||||
github.com/goharbor/harbor/src/pkg/oidc:
|
||||
interfaces:
|
||||
MetaManager:
|
||||
config:
|
||||
dir: testing/pkg/oidc
|
||||
github.com/goharbor/harbor/src/pkg/oidc/dao:
|
||||
interfaces:
|
||||
MetaDAO:
|
||||
config:
|
||||
dir: testing/pkg/oidc/dao
|
||||
github.com/goharbor/harbor/src/pkg/rbac:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/rbac
|
||||
github.com/goharbor/harbor/src/pkg/rbac/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/rbac/dao
|
||||
github.com/goharbor/harbor/src/pkg/robot:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/robot
|
||||
github.com/goharbor/harbor/src/pkg/robot/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/robot/dao
|
||||
github.com/goharbor/harbor/src/pkg/repository:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/repository
|
||||
github.com/goharbor/harbor/src/pkg/repository/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/repository/dao
|
||||
github.com/goharbor/harbor/src/pkg/notification/policy:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/notification/policy
|
||||
github.com/goharbor/harbor/src/pkg/notification/policy/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/notification/policy/dao
|
||||
github.com/goharbor/harbor/src/pkg/immutable/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/immutable/dao
|
||||
github.com/goharbor/harbor/src/pkg/immutable/match:
|
||||
interfaces:
|
||||
ImmutableTagMatcher:
|
||||
config:
|
||||
dir: testing/pkg/immutable
|
||||
filename: matcher.go
|
||||
outpkg: immutable
|
||||
mockname: FakeMatcher
|
||||
github.com/goharbor/harbor/src/pkg/ldap:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/ldap
|
||||
github.com/goharbor/harbor/src/pkg/allowlist:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/allowlist
|
||||
github.com/goharbor/harbor/src/pkg/allowlist/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/allowlist/dao
|
||||
github.com/goharbor/harbor/src/pkg/reg:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/reg
|
||||
github.com/goharbor/harbor/src/pkg/reg/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/reg/dao
|
||||
github.com/goharbor/harbor/src/pkg/reg/adapter:
|
||||
interfaces:
|
||||
Factory:
|
||||
config:
|
||||
dir: controller/replication/flow
|
||||
outpkg: flow
|
||||
mockname: mockFactory
|
||||
filename: mock_adapter_factory_test.go
|
||||
Adapter:
|
||||
config:
|
||||
dir: testing/pkg/reg/adapter
|
||||
github.com/goharbor/harbor/src/pkg/replication:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/replication
|
||||
github.com/goharbor/harbor/src/pkg/replication/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/replication/dao
|
||||
github.com/goharbor/harbor/src/pkg/label:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/label
|
||||
github.com/goharbor/harbor/src/pkg/label/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/label/dao
|
||||
github.com/goharbor/harbor/src/pkg/joblog:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/joblog
|
||||
github.com/goharbor/harbor/src/pkg/joblog/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/joblog/dao
|
||||
github.com/goharbor/harbor/src/pkg/accessory:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/accessory
|
||||
github.com/goharbor/harbor/src/pkg/accessory/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/accessory/dao
|
||||
github.com/goharbor/harbor/src/pkg/accessory/model:
|
||||
interfaces:
|
||||
Accessory:
|
||||
config:
|
||||
dir: testing/pkg/accessory/model
|
||||
github.com/goharbor/harbor/src/pkg/audit:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/audit
|
||||
github.com/goharbor/harbor/src/pkg/audit/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/audit/dao
|
||||
github.com/goharbor/harbor/src/pkg/systemartifact:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact
|
||||
Selector:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact/cleanup
|
||||
outpkg: cleanup
|
||||
github.com/goharbor/harbor/src/pkg/systemartifact/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact/dao
|
||||
github.com/goharbor/harbor/src/pkg/cached/manifest/redis:
|
||||
interfaces:
|
||||
CachedManager:
|
||||
config:
|
||||
dir: testing/pkg/cached/manifest/redis
|
||||
github.com/goharbor/harbor/src/pkg/scan/export:
|
||||
interfaces:
|
||||
FilterProcessor:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
ArtifactDigestCalculator:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
github.com/goharbor/harbor/src/pkg/scan/sbom:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/sbom
|
||||
github.com/goharbor/harbor/src/pkg/registry:
|
||||
interfaces:
|
||||
Client:
|
||||
config:
|
||||
dir: testing/pkg/registry
|
||||
filename: fake_registry_client.go
|
||||
github.com/goharbor/harbor/src/pkg/member:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/member
|
||||
filename: fake_member_manager.go
|
||||
github.com/goharbor/harbor/src/pkg/usergroup:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/usergroup
|
||||
filename: fake_usergroup_manager.go
|
||||
github.com/goharbor/harbor/src/pkg/jobmonitor:
|
||||
config:
|
||||
dir: testing/pkg/jobmonitor
|
||||
interfaces:
|
||||
PoolManager:
|
||||
JobServiceMonitorClient:
|
||||
WorkerManager:
|
||||
QueueManager:
|
||||
RedisClient:
|
||||
github.com/goharbor/harbor/src/pkg/queuestatus:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/queuestatus
|
||||
github.com/goharbor/harbor/src/pkg/securityhub:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/securityhub
|
||||
github.com/goharbor/harbor/src/pkg/tag:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/tag
|
||||
github.com/goharbor/harbor/src/pkg/p2p/preheat/policy:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/p2p/preheat/policy
|
||||
github.com/goharbor/harbor/src/pkg/p2p/preheat/instance:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/p2p/preheat/instance
|
||||
github.com/goharbor/harbor/src/pkg/chart:
|
||||
interfaces:
|
||||
Operator:
|
||||
config:
|
||||
dir: testing/pkg/chart
|
||||
# registryctl related mocks
|
||||
github.com/goharbor/harbor/src/registryctl/client:
|
||||
interfaces:
|
||||
Client:
|
||||
config:
|
||||
dir: testing/registryctl
|
||||
outpkg: registryctl
|
||||
# remote interfaces
|
||||
github.com/docker/distribution:
|
||||
interfaces:
|
||||
Manifest:
|
||||
config:
|
||||
dir: testing/pkg/distribution
|
@ -1,6 +0,0 @@
|
||||
# Migrate Patch
|
||||
This is a simple program to fix the breakage that was introduced by migrate in notary.
|
||||
## Usage
|
||||
```sh
|
||||
patch -database <db_url>
|
||||
```
|
@ -1,88 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
_ "github.com/jackc/pgx/v4/stdlib" // registry pgx driver
|
||||
)
|
||||
|
||||
var dbURL string
|
||||
|
||||
const pgSQLAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false`
|
||||
const pgSQLCheckColStmt string = `SELECT T1.C1, T2.C2 FROM
|
||||
(SELECT COUNT(*) AS C1 FROM information_schema.tables WHERE table_name='schema_migrations') T1,
|
||||
(SELECT COUNT(*) AS C2 FROM information_schema.columns WHERE table_name='schema_migrations' and column_name='dirty') T2`
|
||||
const pgSQLDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )`
|
||||
|
||||
func init() {
|
||||
urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres`
|
||||
flag.StringVar(&dbURL, "database", "", urlUsage)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
log.Printf("Updating database.")
|
||||
if !strings.HasPrefix(dbURL, "postgres://") {
|
||||
log.Fatalf("Invalid URL: '%s'\n", dbURL)
|
||||
}
|
||||
db, err := sql.Open("pgx", dbURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to Database, error: %v\n", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
|
||||
err := db.Ping()
|
||||
for ; err != nil; err = db.Ping() {
|
||||
log.Println("Failed to Ping DB, sleep for 1 second.")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(30 * time.Second):
|
||||
log.Fatal("Failed to connect DB after 30 seconds, time out. \n")
|
||||
}
|
||||
|
||||
row := db.QueryRow(pgSQLCheckColStmt)
|
||||
var tblCount, colCount int
|
||||
if err := row.Scan(&tblCount, &colCount); err != nil {
|
||||
log.Fatalf("Failed to check schema_migrations table, error: %v \n", err)
|
||||
}
|
||||
if tblCount == 0 {
|
||||
log.Println("schema_migrations table does not exist, skip.")
|
||||
return
|
||||
}
|
||||
if colCount > 0 {
|
||||
log.Println("schema_migrations table does not require update, skip.")
|
||||
return
|
||||
}
|
||||
if _, err := db.Exec(pgSQLDelRows); err != nil {
|
||||
log.Fatalf("Failed to clean up table, error: %v", err)
|
||||
}
|
||||
if _, err := db.Exec(pgSQLAlterStmt); err != nil {
|
||||
log.Fatalf("Failed to update database, error: %v \n", err)
|
||||
}
|
||||
log.Println("Done updating database.")
|
||||
}
|
@ -116,9 +116,9 @@ func (b *BaseAPI) DecodeJSONReqAndValidate(v interface{}) (bool, error) {
|
||||
}
|
||||
|
||||
// Redirect does redirection to resource URI with http header status code.
|
||||
func (b *BaseAPI) Redirect(statusCode int, resouceID string) {
|
||||
func (b *BaseAPI) Redirect(statusCode int, resourceID string) {
|
||||
requestURI := b.Ctx.Request.RequestURI
|
||||
resourceURI := requestURI + "/" + resouceID
|
||||
resourceURI := requestURI + "/" + resourceID
|
||||
|
||||
b.Ctx.Redirect(statusCode, resourceURI)
|
||||
}
|
||||
@ -138,7 +138,7 @@ func (b *BaseAPI) GetIDFromURL() (int64, error) {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// SetPaginationHeader set"Link" and "X-Total-Count" header for pagination request
|
||||
// SetPaginationHeader set "Link" and "X-Total-Count" header for pagination request
|
||||
func (b *BaseAPI) SetPaginationHeader(total, page, pageSize int64) {
|
||||
b.Ctx.ResponseWriter.Header().Set("X-Total-Count", strconv.FormatInt(total, 10))
|
||||
|
||||
|
@ -134,6 +134,7 @@ const (
|
||||
OIDCGroupType = 3
|
||||
LDAPGroupAdminDn = "ldap_group_admin_dn"
|
||||
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
|
||||
LDAPGroupAttachParallel = "ldap_group_attach_parallel"
|
||||
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
|
||||
DefaultPortalURL = "http://portal:8080"
|
||||
DefaultRegistryCtlURL = "http://registryctl:8080"
|
||||
@ -151,7 +152,7 @@ const (
|
||||
OIDCCallbackPath = "/c/oidc/callback"
|
||||
OIDCLoginPath = "/c/oidc/login"
|
||||
|
||||
AuthProxyRediretPath = "/c/authproxy/redirect"
|
||||
AuthProxyRedirectPath = "/c/authproxy/redirect"
|
||||
|
||||
// Global notification enable configuration
|
||||
NotificationEnable = "notification_enable"
|
||||
|
@ -48,7 +48,7 @@ func GetInternalCertPair() (tls.Certificate, error) {
|
||||
|
||||
// GetInternalTLSConfig return a tls.Config for internal https communicate
|
||||
func GetInternalTLSConfig() (*tls.Config, error) {
|
||||
// genrate key pair
|
||||
// generate key pair
|
||||
cert, err := GetInternalCertPair()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal TLS enabled but can't get cert file %w", err)
|
||||
|
@ -151,7 +151,7 @@ func (d *DefaultClient) SubmitJob(jd *models.JobData) (string, error) {
|
||||
return stats.Stats.JobID, nil
|
||||
}
|
||||
|
||||
// GetJobLog call jobserivce API to get the log of a job. It only accepts the UUID of the job
|
||||
// GetJobLog call jobservice API to get the log of a job. It only accepts the UUID of the job
|
||||
func (d *DefaultClient) GetJobLog(uuid string) ([]byte, error) {
|
||||
url := d.endpoint + "/api/v1/jobs/" + uuid + "/log"
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
|
@ -62,7 +62,7 @@ type StatsInfo struct {
|
||||
UpstreamJobID string `json:"upstream_job_id,omitempty"` // Ref the upstream job if existing
|
||||
NumericPID int64 `json:"numeric_policy_id,omitempty"` // The numeric policy ID of the periodic job
|
||||
Parameters Parameters `json:"parameters,omitempty"`
|
||||
Revision int64 `json:"revision,omitempty"` // For differentiating the each retry of the same job
|
||||
Revision int64 `json:"revision,omitempty"` // For differentiating each retry of the same job
|
||||
}
|
||||
|
||||
// JobPoolStats represents the healthy and status of all the running worker pools.
|
||||
@ -70,7 +70,7 @@ type JobPoolStats struct {
|
||||
Pools []*JobPoolStatsData `json:"worker_pools"`
|
||||
}
|
||||
|
||||
// JobPoolStatsData represent the healthy and status of the worker worker.
|
||||
// JobPoolStatsData represent the healthy and status of the worker.
|
||||
type JobPoolStatsData struct {
|
||||
WorkerPoolID string `json:"worker_pool_id"`
|
||||
StartedAt int64 `json:"started_at"`
|
||||
|
@ -29,7 +29,7 @@ const (
|
||||
JobCanceled string = "canceled"
|
||||
// JobRetrying indicate the job needs to be retried, it will be scheduled to the end of job queue by statemachine after an interval.
|
||||
JobRetrying string = "retrying"
|
||||
// JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on trasition table.
|
||||
// JobContinue is the status returned by statehandler to tell statemachine to move to next possible state based on transition table.
|
||||
JobContinue string = "_continue"
|
||||
// JobScheduled ...
|
||||
JobScheduled string = "scheduled"
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
package models
|
||||
|
||||
// UAASettings wraps the configuraations to access UAA service
|
||||
// UAASettings wraps the configurations to access UAA service
|
||||
type UAASettings struct {
|
||||
Endpoint string
|
||||
ClientID string
|
||||
|
@ -14,7 +14,9 @@
|
||||
|
||||
package rbac
|
||||
|
||||
import "github.com/goharbor/harbor/src/pkg/permission/types"
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/pkg/permission/types"
|
||||
)
|
||||
|
||||
// const action variables
|
||||
const (
|
||||
@ -81,9 +83,86 @@ const (
|
||||
ResourceSecurityHub = Resource("security-hub")
|
||||
)
|
||||
|
||||
type scope string
|
||||
|
||||
const (
|
||||
ScopeSystem = scope("System")
|
||||
ScopeProject = scope("Project")
|
||||
)
|
||||
|
||||
// RobotPermissionProvider defines the permission provider for robot account
|
||||
type RobotPermissionProvider interface {
|
||||
GetPermissions(s scope) []*types.Policy
|
||||
}
|
||||
|
||||
// GetPermissionProvider gives the robot permission provider
|
||||
func GetPermissionProvider() RobotPermissionProvider {
|
||||
// TODO will determine by the ui configuration
|
||||
return &NolimitProvider{}
|
||||
}
|
||||
|
||||
// BaseProvider ...
|
||||
type BaseProvider struct {
|
||||
}
|
||||
|
||||
// GetPermissions ...
|
||||
func (d *BaseProvider) GetPermissions(s scope) []*types.Policy {
|
||||
return PoliciesMap[s]
|
||||
}
|
||||
|
||||
// NolimitProvider ...
|
||||
type NolimitProvider struct {
|
||||
BaseProvider
|
||||
}
|
||||
|
||||
// GetPermissions ...
|
||||
func (n *NolimitProvider) GetPermissions(s scope) []*types.Policy {
|
||||
if s == ScopeSystem {
|
||||
return append(n.BaseProvider.GetPermissions(ScopeSystem),
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionRead},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionList},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionDelete},
|
||||
|
||||
&types.Policy{Resource: ResourceUser, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceUser, Action: ActionRead},
|
||||
&types.Policy{Resource: ResourceUser, Action: ActionUpdate},
|
||||
&types.Policy{Resource: ResourceUser, Action: ActionList},
|
||||
&types.Policy{Resource: ResourceUser, Action: ActionDelete},
|
||||
|
||||
&types.Policy{Resource: ResourceLdapUser, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceLdapUser, Action: ActionList},
|
||||
|
||||
&types.Policy{Resource: ResourceExportCVE, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceExportCVE, Action: ActionRead},
|
||||
|
||||
&types.Policy{Resource: ResourceQuota, Action: ActionUpdate},
|
||||
|
||||
&types.Policy{Resource: ResourceUserGroup, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceUserGroup, Action: ActionRead},
|
||||
&types.Policy{Resource: ResourceUserGroup, Action: ActionUpdate},
|
||||
&types.Policy{Resource: ResourceUserGroup, Action: ActionList},
|
||||
&types.Policy{Resource: ResourceUserGroup, Action: ActionDelete})
|
||||
}
|
||||
if s == ScopeProject {
|
||||
return append(n.BaseProvider.GetPermissions(ScopeProject),
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionRead},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionList},
|
||||
&types.Policy{Resource: ResourceRobot, Action: ActionDelete},
|
||||
|
||||
&types.Policy{Resource: ResourceMember, Action: ActionCreate},
|
||||
&types.Policy{Resource: ResourceMember, Action: ActionRead},
|
||||
&types.Policy{Resource: ResourceMember, Action: ActionUpdate},
|
||||
&types.Policy{Resource: ResourceMember, Action: ActionList},
|
||||
&types.Policy{Resource: ResourceMember, Action: ActionDelete})
|
||||
}
|
||||
return []*types.Policy{}
|
||||
}
|
||||
|
||||
var (
|
||||
PoliciesMap = map[string][]*types.Policy{
|
||||
"System": {
|
||||
PoliciesMap = map[scope][]*types.Policy{
|
||||
ScopeSystem: {
|
||||
{Resource: ResourceAuditLog, Action: ActionList},
|
||||
|
||||
{Resource: ResourcePreatInstance, Action: ActionRead},
|
||||
@ -154,7 +233,7 @@ var (
|
||||
{Resource: ResourceQuota, Action: ActionRead},
|
||||
{Resource: ResourceQuota, Action: ActionList},
|
||||
},
|
||||
"Project": {
|
||||
ScopeProject: {
|
||||
{Resource: ResourceLog, Action: ActionList},
|
||||
|
||||
{Resource: ResourceProject, Action: ActionRead},
|
||||
|
36
src/common/rbac/const_test.go
Normal file
36
src/common/rbac/const_test.go
Normal file
@ -0,0 +1,36 @@
|
||||
package rbac
|
||||
|
||||
import (
|
||||
_ "github.com/goharbor/harbor/src/pkg/config/inmemory"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBaseProvider(t *testing.T) {
|
||||
permissionProvider := &BaseProvider{}
|
||||
sysPermissions := permissionProvider.GetPermissions(ScopeSystem)
|
||||
|
||||
for _, per := range sysPermissions {
|
||||
if per.Action == ActionCreate && per.Resource == ResourceRobot {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNolimitProvider(t *testing.T) {
|
||||
permissionProvider := &BaseProvider{}
|
||||
sysPermissions := permissionProvider.GetPermissions(ScopeSystem)
|
||||
|
||||
for _, per := range sysPermissions {
|
||||
if per.Action == ActionCreate && per.Resource == ResourceRobot {
|
||||
t.Log("no limit provider has the permission of robot account creation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPermissionProvider(t *testing.T) {
|
||||
defaultPro := GetPermissionProvider()
|
||||
_, ok := defaultPro.(*NolimitProvider)
|
||||
assert.True(t, ok)
|
||||
}
|
@ -58,7 +58,7 @@ func NewBuilderForUser(user *models.User, ctl project.Controller) RBACUserBuilde
|
||||
// NewBuilderForPolicies create a builder for the policies
|
||||
func NewBuilderForPolicies(username string, policies []*types.Policy,
|
||||
filters ...func(*proModels.Project, []*types.Policy) []*types.Policy) RBACUserBuilder {
|
||||
return func(ctx context.Context, p *proModels.Project) types.RBACUser {
|
||||
return func(_ context.Context, p *proModels.Project) types.RBACUser {
|
||||
for _, filter := range filters {
|
||||
policies = filter(p, policies)
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ func TestProjectRoleAccess(t *testing.T) {
|
||||
Username: "username",
|
||||
}
|
||||
evaluator := NewEvaluator(ctl, NewBuilderForUser(user, ctl))
|
||||
resorce := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
|
||||
assert.True(evaluator.HasPermission(context.TODO(), resorce, rbac.ActionPush))
|
||||
resource := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
|
||||
assert.True(evaluator.HasPermission(context.TODO(), resource, rbac.ActionPush))
|
||||
}
|
||||
|
||||
{
|
||||
@ -101,8 +101,8 @@ func TestProjectRoleAccess(t *testing.T) {
|
||||
Username: "username",
|
||||
}
|
||||
evaluator := NewEvaluator(ctl, NewBuilderForUser(user, ctl))
|
||||
resorce := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
|
||||
assert.False(evaluator.HasPermission(context.TODO(), resorce, rbac.ActionPush))
|
||||
resource := NewNamespace(public.ProjectID).Resource(rbac.ResourceRepository)
|
||||
assert.False(evaluator.HasPermission(context.TODO(), resource, rbac.ActionPush))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
|
||||
// NewEvaluator create evaluator for the system
|
||||
func NewEvaluator(username string, policies []*types.Policy) evaluator.Evaluator {
|
||||
return namespace.New(NamespaceKind, func(ctx context.Context, ns types.Namespace) evaluator.Evaluator {
|
||||
return namespace.New(NamespaceKind, func(_ context.Context, _ types.Namespace) evaluator.Evaluator {
|
||||
return rbac.New(&rbacUser{
|
||||
username: username,
|
||||
policies: policies,
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
const HeaderPrefix = "Harbor-Secret "
|
||||
|
||||
// FromRequest tries to get Harbor Secret from request header.
|
||||
// It will return empty string if the reqeust is nil.
|
||||
// It will return empty string if the request is nil.
|
||||
func FromRequest(req *http.Request) string {
|
||||
if req == nil {
|
||||
return ""
|
||||
|
@ -70,7 +70,7 @@ func Send(addr, identity, username, password string,
|
||||
|
||||
// Ping tests the connection and authentication with email server
|
||||
// If tls is true, a secure connection is established, or Ping
|
||||
// trys to upgrate the insecure connection to a secure one if
|
||||
// trys to upgrade the insecure connection to a secure one if
|
||||
// email server supports it.
|
||||
// Ping doesn't verify the server's certificate and hostname when
|
||||
// needed if the parameter insecure is ture
|
||||
@ -119,7 +119,7 @@ func newClient(addr, identity, username, password string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// try to swith to SSL/TLS
|
||||
// try to switch to SSL/TLS
|
||||
if !tls {
|
||||
if ok, _ := client.Extension("STARTTLS"); ok {
|
||||
log.Debugf("switching the connection with %s to SSL/TLS ...", addr)
|
||||
|
@ -38,7 +38,7 @@ func TestSend(t *testing.T) {
|
||||
err := Send(addr, identity, username, password,
|
||||
timeout, tls, insecure, from, to,
|
||||
subject, message)
|
||||
// bypass the check due to securty policy change on gmail
|
||||
// bypass the check due to security policy change on gmail
|
||||
// TODO
|
||||
// assert.Nil(t, err)
|
||||
|
||||
@ -78,7 +78,7 @@ func TestPing(t *testing.T) {
|
||||
// tls connection
|
||||
err := Ping(addr, identity, username, password,
|
||||
timeout, tls, insecure)
|
||||
// bypass the check due to securty policy change on gmail
|
||||
// bypass the check due to security policy change on gmail
|
||||
// TODO
|
||||
// assert.Nil(t, err)
|
||||
|
||||
|
@ -46,8 +46,8 @@ var HashAlg = map[string]func() hash.Hash{
|
||||
}
|
||||
|
||||
// Encrypt encrypts the content with salt
|
||||
func Encrypt(content string, salt string, encrptAlg string) string {
|
||||
return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, HashAlg[encrptAlg]))
|
||||
func Encrypt(content string, salt string, encryptAlg string) string {
|
||||
return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, HashAlg[encryptAlg]))
|
||||
}
|
||||
|
||||
// ReversibleEncrypt encrypts the str with aes/base64
|
||||
|
@ -72,7 +72,7 @@ func (p *passportsPool) Revoke() bool {
|
||||
type LimitedConcurrentRunner interface {
|
||||
// AddTask adds a task to run
|
||||
AddTask(task func() error)
|
||||
// Wait waits all the tasks to be finished, returns error if the any of the tasks gets error
|
||||
// Wait waits all the tasks to be finished, returns error if any of the tasks gets error
|
||||
Wait() (err error)
|
||||
// Cancel cancels all tasks, tasks that already started will continue to run
|
||||
Cancel(err error)
|
||||
@ -106,7 +106,7 @@ func (r *limitedConcurrentRunner) AddTask(task func() error) {
|
||||
r.wg.Done()
|
||||
}()
|
||||
|
||||
// Return false means no passport acquired, and no valid passport will be dispatched any more.
|
||||
// Return false means no passport acquired, and no valid passport will be dispatched anymore.
|
||||
// For example, some crucial errors happened and all tasks should be cancelled.
|
||||
if ok := r.passportsPool.Apply(); !ok {
|
||||
return
|
||||
|
@ -65,7 +65,7 @@ var defaultConfig = map[string]interface{}{
|
||||
common.RobotNamePrefix: "robot$",
|
||||
}
|
||||
|
||||
// GetDefaultConfigMap returns the defailt config map for easier modification.
|
||||
// GetDefaultConfigMap returns the default config map for easier modification.
|
||||
func GetDefaultConfigMap() map[string]interface{} {
|
||||
return defaultConfig
|
||||
}
|
||||
|
@ -55,11 +55,11 @@ type Response struct {
|
||||
StatusCode int
|
||||
// Headers are the headers of the response
|
||||
Headers map[string]string
|
||||
// Boby is the body of the response
|
||||
// Body is the body of the response
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Handler returns a handler function which handle requst according to
|
||||
// Handler returns a handler function which handle request according to
|
||||
// the response provided
|
||||
func Handler(resp *Response) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -82,7 +82,7 @@ func Handler(resp *Response) func(http.ResponseWriter, *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewServer creates a HTTP server for unit test
|
||||
// NewServer creates an HTTP server for unit test
|
||||
func NewServer(mappings ...*RequestHandlerMapping) *httptest.Server {
|
||||
r := mux.NewRouter()
|
||||
|
||||
|
@ -89,7 +89,7 @@ func GenerateRandomString() string {
|
||||
// TestTCPConn tests TCP connection
|
||||
// timeout: the total time before returning if something is wrong
|
||||
// with the connection, in second
|
||||
// interval: the interval time for retring after failure, in second
|
||||
// interval: the interval time for retrying after failure, in second
|
||||
func TestTCPConn(addr string, timeout, interval int) error {
|
||||
success := make(chan int, 1)
|
||||
cancel := make(chan int, 1)
|
||||
@ -176,7 +176,7 @@ func ParseProjectIDOrName(value interface{}) (int64, string, error) {
|
||||
return id, name, nil
|
||||
}
|
||||
|
||||
// SafeCastString -- cast a object to string saftely
|
||||
// SafeCastString -- cast an object to string safely
|
||||
func SafeCastString(value interface{}) string {
|
||||
if result, ok := value.(string); ok {
|
||||
return result
|
||||
|
@ -106,7 +106,7 @@ func parseV1alpha1Icon(artifact *artifact.Artifact, manifest *v1.Manifest, reg r
|
||||
switch contentType {
|
||||
case GIF, PNG, JPEG:
|
||||
default:
|
||||
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("unsupported content type: %s", contentType)
|
||||
return errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("unsupported content type: %s", contentType)
|
||||
}
|
||||
artifact.Icon = iconDigest
|
||||
return nil
|
||||
|
@ -118,6 +118,8 @@ type Controller interface {
|
||||
Walk(ctx context.Context, root *Artifact, walkFn func(*Artifact) error, option *Option) error
|
||||
// HasUnscannableLayer check artifact with digest if has unscannable layer
|
||||
HasUnscannableLayer(ctx context.Context, dgst string) (bool, error)
|
||||
// ListWithLatest list the artifacts when the latest_in_repository in the query was set
|
||||
ListWithLatest(ctx context.Context, query *q.Query, option *Option) (artifacts []*Artifact, err error)
|
||||
}
|
||||
|
||||
// NewController creates an instance of the default artifact controller
|
||||
@ -171,16 +173,18 @@ func (c *controller) Ensure(ctx context.Context, repository, digest string, opti
|
||||
}
|
||||
}
|
||||
}
|
||||
// fire event
|
||||
e := &metadata.PushArtifactEventMetadata{
|
||||
Ctx: ctx,
|
||||
Artifact: artifact,
|
||||
}
|
||||
if created {
|
||||
// fire event for create
|
||||
e := &metadata.PushArtifactEventMetadata{
|
||||
Ctx: ctx,
|
||||
Artifact: artifact,
|
||||
}
|
||||
|
||||
if option != nil && len(option.Tags) > 0 {
|
||||
e.Tag = option.Tags[0]
|
||||
if option != nil && len(option.Tags) > 0 {
|
||||
e.Tag = option.Tags[0]
|
||||
}
|
||||
notification.AddEvent(ctx, e)
|
||||
}
|
||||
notification.AddEvent(ctx, e)
|
||||
return created, artifact.ID, nil
|
||||
}
|
||||
|
||||
@ -303,7 +307,7 @@ func (c *controller) getByTag(ctx context.Context, repository, tag string, optio
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("artifact %s:%s not found", repository, tag)
|
||||
WithMessagef("artifact %s:%s not found", repository, tag)
|
||||
}
|
||||
return c.Get(ctx, tags[0].ArtifactID, option)
|
||||
}
|
||||
@ -782,3 +786,16 @@ func (c *controller) HasUnscannableLayer(ctx context.Context, dgst string) (bool
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ListWithLatest ...
|
||||
func (c *controller) ListWithLatest(ctx context.Context, query *q.Query, option *Option) (artifacts []*Artifact, err error) {
|
||||
arts, err := c.artMgr.ListWithLatest(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res []*Artifact
|
||||
for _, art := range arts {
|
||||
res = append(res, c.assembleArtifact(ctx, art, option))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ type controllerTestSuite struct {
|
||||
ctl *controller
|
||||
repoMgr *repotesting.Manager
|
||||
artMgr *arttesting.Manager
|
||||
artrashMgr *artrashtesting.FakeManager
|
||||
artrashMgr *artrashtesting.Manager
|
||||
blobMgr *blob.Manager
|
||||
tagCtl *tagtesting.FakeController
|
||||
labelMgr *label.Manager
|
||||
@ -80,7 +80,7 @@ type controllerTestSuite struct {
|
||||
func (c *controllerTestSuite) SetupTest() {
|
||||
c.repoMgr = &repotesting.Manager{}
|
||||
c.artMgr = &arttesting.Manager{}
|
||||
c.artrashMgr = &artrashtesting.FakeManager{}
|
||||
c.artrashMgr = &artrashtesting.Manager{}
|
||||
c.blobMgr = &blob.Manager{}
|
||||
c.tagCtl = &tagtesting.FakeController{}
|
||||
c.labelMgr = &label.Manager{}
|
||||
@ -323,6 +323,44 @@ func (c *controllerTestSuite) TestList() {
|
||||
c.Equal(0, len(artifacts[0].Accessories))
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestListWithLatest() {
|
||||
query := &q.Query{}
|
||||
option := &Option{
|
||||
WithTag: true,
|
||||
WithAccessory: true,
|
||||
}
|
||||
c.artMgr.On("ListWithLatest", mock.Anything, mock.Anything).Return([]*artifact.Artifact{
|
||||
{
|
||||
ID: 1,
|
||||
RepositoryID: 1,
|
||||
},
|
||||
}, nil)
|
||||
c.tagCtl.On("List").Return([]*tag.Tag{
|
||||
{
|
||||
Tag: model_tag.Tag{
|
||||
ID: 1,
|
||||
RepositoryID: 1,
|
||||
ArtifactID: 1,
|
||||
Name: "latest",
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
c.repoMgr.On("Get", mock.Anything, mock.Anything).Return(&repomodel.RepoRecord{
|
||||
Name: "library/hello-world",
|
||||
}, nil)
|
||||
c.repoMgr.On("List", mock.Anything, mock.Anything).Return([]*repomodel.RepoRecord{
|
||||
{RepositoryID: 1, Name: "library/hello-world"},
|
||||
}, nil)
|
||||
c.accMgr.On("List", mock.Anything, mock.Anything).Return([]accessorymodel.Accessory{}, nil)
|
||||
artifacts, err := c.ctl.ListWithLatest(nil, query, option)
|
||||
c.Require().Nil(err)
|
||||
c.Require().Len(artifacts, 1)
|
||||
c.Equal(int64(1), artifacts[0].ID)
|
||||
c.Require().Len(artifacts[0].Tags, 1)
|
||||
c.Equal(int64(1), artifacts[0].Tags[0].ID)
|
||||
c.Equal(0, len(artifacts[0].Accessories))
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestGet() {
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything, mock.Anything).Return(&artifact.Artifact{
|
||||
ID: 1,
|
||||
@ -476,7 +514,7 @@ func (c *controllerTestSuite) TestDeleteDeeply() {
|
||||
},
|
||||
}, nil)
|
||||
c.repoMgr.On("Get", mock.Anything, mock.Anything).Return(&repomodel.RepoRecord{}, nil)
|
||||
c.artrashMgr.On("Create").Return(0, nil)
|
||||
c.artrashMgr.On("Create", mock.Anything, mock.Anything).Return(int64(0), nil)
|
||||
c.accMgr.On("List", mock.Anything, mock.Anything).Return([]accessorymodel.Accessory{}, nil)
|
||||
err = c.ctl.deleteDeeply(orm.NewContext(nil, &ormtesting.FakeOrmer{}), 1, false, false)
|
||||
c.Require().Nil(err)
|
||||
@ -534,7 +572,7 @@ func (c *controllerTestSuite) TestDeleteDeeply() {
|
||||
c.blobMgr.On("List", mock.Anything, mock.Anything).Return(nil, nil)
|
||||
c.blobMgr.On("CleanupAssociationsForProject", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
c.repoMgr.On("Get", mock.Anything, mock.Anything).Return(&repomodel.RepoRecord{}, nil)
|
||||
c.artrashMgr.On("Create").Return(0, nil)
|
||||
c.artrashMgr.On("Create", mock.Anything, mock.Anything).Return(int64(0), nil)
|
||||
err = c.ctl.deleteDeeply(orm.NewContext(nil, &ormtesting.FakeOrmer{}), 1, true, true)
|
||||
c.Require().Nil(err)
|
||||
|
||||
|
@ -102,8 +102,9 @@ type AdditionLink struct {
|
||||
|
||||
// Option is used to specify the properties returned when listing/getting artifacts
|
||||
type Option struct {
|
||||
WithTag bool
|
||||
TagOption *tag.Option // only works when WithTag is set to true
|
||||
WithLabel bool
|
||||
WithAccessory bool
|
||||
WithTag bool
|
||||
TagOption *tag.Option // only works when WithTag is set to true
|
||||
WithLabel bool
|
||||
WithAccessory bool
|
||||
LatestInRepository bool
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ func (m *IndexProcessor) AbstractMetadata(_ context.Context, _ *artifact.Artifac
|
||||
// AbstractAddition abstracts the addition of artifact
|
||||
func (m *IndexProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported", addition)
|
||||
WithMessagef("addition %s isn't supported", addition)
|
||||
}
|
||||
|
||||
// GetArtifactType returns the artifact type
|
||||
|
@ -66,7 +66,7 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
|
||||
// AbstractAddition abstracts the addition of artifact
|
||||
func (m *ManifestProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported", addition)
|
||||
WithMessagef("addition %s isn't supported", addition)
|
||||
}
|
||||
|
||||
// GetArtifactType returns the artifact type
|
||||
|
@ -61,7 +61,7 @@ type processor struct {
|
||||
func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
|
||||
if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s", addition, ArtifactTypeChart)
|
||||
WithMessagef("addition %s isn't supported for %s", addition, ArtifactTypeChart)
|
||||
}
|
||||
|
||||
m, _, err := p.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||
|
@ -64,12 +64,12 @@ type processorTestSuite struct {
|
||||
suite.Suite
|
||||
processor *processor
|
||||
regCli *registry.Client
|
||||
chartOptr *chart.FakeOpertaor
|
||||
chartOptr *chart.Operator
|
||||
}
|
||||
|
||||
func (p *processorTestSuite) SetupTest() {
|
||||
p.regCli = ®istry.Client{}
|
||||
p.chartOptr = &chart.FakeOpertaor{}
|
||||
p.chartOptr = &chart.Operator{}
|
||||
p.processor = &processor{
|
||||
chartOperator: p.chartOptr,
|
||||
}
|
||||
@ -106,7 +106,7 @@ func (p *processorTestSuite) TestAbstractAddition() {
|
||||
p.Require().Nil(err)
|
||||
p.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
p.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(0), io.NopCloser(strings.NewReader(chartYaml)), nil)
|
||||
p.chartOptr.On("GetDetails").Return(chartDetails, nil)
|
||||
p.chartOptr.On("GetDetails", mock.Anything).Return(chartDetails, nil)
|
||||
|
||||
// values.yaml
|
||||
addition, err := p.processor.AbstractAddition(nil, artifact, AdditionTypeValues)
|
||||
|
@ -132,5 +132,5 @@ func (d *defaultProcessor) AbstractAddition(_ context.Context, artifact *artifac
|
||||
// It will be support in the future.
|
||||
// return error directly
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("the processor for artifact %s not found, cannot get the addition", artifact.Type)
|
||||
WithMessagef("the processor for artifact %s not found, cannot get the addition", artifact.Type)
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *arti
|
||||
|
||||
func (m *manifestV1Processor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
|
||||
WithMessagef("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
|
||||
}
|
||||
|
||||
func (m *manifestV1Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
|
@ -87,7 +87,7 @@ func (m *manifestV2Processor) AbstractMetadata(ctx context.Context, artifact *ar
|
||||
func (m *manifestV2Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
if addition != AdditionTypeBuildHistory {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeImage)
|
||||
WithMessagef("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeImage)
|
||||
}
|
||||
|
||||
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||
|
@ -103,7 +103,7 @@ func (m *Processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact
|
||||
func (m *Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
if addition != AdditionTypeBuildHistory {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeWASM)
|
||||
WithMessagef("addition %s isn't supported for %s(manifest version 2)", addition, ArtifactTypeWASM)
|
||||
}
|
||||
|
||||
mani, _, err := m.RegCli.PullManifest(artifact.RepositoryName, artifact.Digest)
|
||||
|
@ -248,7 +248,7 @@ func (c *controller) Get(ctx context.Context, digest string, options ...Option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(blobs) == 0 {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("blob %s not found", digest)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("blob %s not found", digest)
|
||||
}
|
||||
|
||||
return blobs[0], nil
|
||||
@ -363,7 +363,7 @@ func (c *controller) Touch(ctx context.Context, blob *blob.Blob) error {
|
||||
return err
|
||||
}
|
||||
if count == 0 {
|
||||
return errors.New(nil).WithMessage(fmt.Sprintf("no blob item is updated to StatusNone, id:%d, digest:%s", blob.ID, blob.Digest)).WithCode(errors.NotFoundCode)
|
||||
return errors.New(nil).WithMessagef("no blob item is updated to StatusNone, id:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -375,7 +375,7 @@ func (c *controller) Fail(ctx context.Context, blob *blob.Blob) error {
|
||||
return err
|
||||
}
|
||||
if count == 0 {
|
||||
return errors.New(nil).WithMessage(fmt.Sprintf("no blob item is updated to StatusDeleteFailed, id:%d, digest:%s", blob.ID, blob.Digest)).WithCode(errors.NotFoundCode)
|
||||
return errors.New(nil).WithMessagef("no blob item is updated to StatusDeleteFailed, id:%d, digest:%s", blob.ID, blob.Digest).WithCode(errors.NotFoundCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -182,11 +182,11 @@ func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error
|
||||
// the cfgs is unmarshal from json string, the number type will be float64
|
||||
if vf, ok := v.(float64); ok {
|
||||
if vf <= 0 {
|
||||
return errors.BadRequestError(nil).WithMessage("the %s value must be positive", c)
|
||||
return errors.BadRequestError(nil).WithMessagef("the %s value must be positive", c)
|
||||
}
|
||||
|
||||
if int64(vf) > maxValue {
|
||||
return errors.BadRequestError(nil).WithMessage(fmt.Sprintf("the %s value is over the limit value: %d", c, maxValue))
|
||||
return errors.BadRequestError(nil).WithMessagef("the %s value is over the limit value: %d", c, maxValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -45,7 +45,8 @@ func (h *Handler) Handle(ctx context.Context, value interface{}) error {
|
||||
switch v := value.(type) {
|
||||
case *event.PushArtifactEvent, *event.DeleteArtifactEvent,
|
||||
*event.DeleteRepositoryEvent, *event.CreateProjectEvent, *event.DeleteProjectEvent,
|
||||
*event.DeleteTagEvent, *event.CreateTagEvent:
|
||||
*event.DeleteTagEvent, *event.CreateTagEvent,
|
||||
*event.CreateRobotEvent, *event.DeleteRobotEvent:
|
||||
addAuditLog = true
|
||||
case *event.PullArtifactEvent:
|
||||
addAuditLog = !config.PullAuditLogDisable(ctx)
|
||||
|
@ -65,6 +65,8 @@ func init() {
|
||||
_ = notifier.Subscribe(event.TopicDeleteRepository, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicCreateTag, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteTag, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicCreateRobot, &auditlog.Handler{})
|
||||
_ = notifier.Subscribe(event.TopicDeleteRobot, &auditlog.Handler{})
|
||||
|
||||
// internal
|
||||
_ = notifier.Subscribe(event.TopicPullArtifact, &internal.ArtifactEventHandler{})
|
||||
|
73
src/controller/event/metadata/robot.go
Normal file
73
src/controller/event/metadata/robot.go
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/security"
|
||||
event2 "github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/pkg/notifier/event"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
)
|
||||
|
||||
// CreateRobotEventMetadata is the metadata from which the create robot event can be resolved
|
||||
type CreateRobotEventMetadata struct {
|
||||
Ctx context.Context
|
||||
Robot *model.Robot
|
||||
}
|
||||
|
||||
// Resolve to the event from the metadata
|
||||
func (c *CreateRobotEventMetadata) Resolve(event *event.Event) error {
|
||||
data := &event2.CreateRobotEvent{
|
||||
EventType: event2.TopicCreateRobot,
|
||||
Robot: c.Robot,
|
||||
OccurAt: time.Now(),
|
||||
}
|
||||
cx, exist := security.FromContext(c.Ctx)
|
||||
if exist {
|
||||
data.Operator = cx.GetUsername()
|
||||
}
|
||||
data.Robot.Name = fmt.Sprintf("%s%s", config.RobotPrefix(c.Ctx), data.Robot.Name)
|
||||
event.Topic = event2.TopicCreateRobot
|
||||
event.Data = data
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteRobotEventMetadata is the metadata from which the delete robot event can be resolved
|
||||
type DeleteRobotEventMetadata struct {
|
||||
Ctx context.Context
|
||||
Robot *model.Robot
|
||||
}
|
||||
|
||||
// Resolve to the event from the metadata
|
||||
func (d *DeleteRobotEventMetadata) Resolve(event *event.Event) error {
|
||||
data := &event2.DeleteRobotEvent{
|
||||
EventType: event2.TopicDeleteRobot,
|
||||
Robot: d.Robot,
|
||||
OccurAt: time.Now(),
|
||||
}
|
||||
cx, exist := security.FromContext(d.Ctx)
|
||||
if exist {
|
||||
data.Operator = cx.GetUsername()
|
||||
}
|
||||
data.Robot.Name = fmt.Sprintf("%s%s", config.RobotPrefix(d.Ctx), data.Robot.Name)
|
||||
event.Topic = event2.TopicDeleteRobot
|
||||
event.Data = data
|
||||
return nil
|
||||
}
|
83
src/controller/event/metadata/robot_test.go
Normal file
83
src/controller/event/metadata/robot_test.go
Normal file
@ -0,0 +1,83 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
event2 "github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
_ "github.com/goharbor/harbor/src/pkg/config/inmemory"
|
||||
"github.com/goharbor/harbor/src/pkg/notifier/event"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
)
|
||||
|
||||
type robotEventTestSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (t *tagEventTestSuite) TestResolveOfCreateRobotEventMetadata() {
|
||||
cfg := map[string]interface{}{
|
||||
common.RobotPrefix: "robot$",
|
||||
}
|
||||
config.InitWithSettings(cfg)
|
||||
|
||||
e := &event.Event{}
|
||||
metadata := &CreateRobotEventMetadata{
|
||||
Ctx: context.Background(),
|
||||
Robot: &model.Robot{
|
||||
ID: 1,
|
||||
Name: "test",
|
||||
},
|
||||
}
|
||||
err := metadata.Resolve(e)
|
||||
t.Require().Nil(err)
|
||||
t.Equal(event2.TopicCreateRobot, e.Topic)
|
||||
t.Require().NotNil(e.Data)
|
||||
data, ok := e.Data.(*event2.CreateRobotEvent)
|
||||
t.Require().True(ok)
|
||||
t.Equal(int64(1), data.Robot.ID)
|
||||
t.Equal("robot$test", data.Robot.Name)
|
||||
}
|
||||
|
||||
func (t *tagEventTestSuite) TestResolveOfDeleteRobotEventMetadata() {
|
||||
cfg := map[string]interface{}{
|
||||
common.RobotPrefix: "robot$",
|
||||
}
|
||||
config.InitWithSettings(cfg)
|
||||
|
||||
e := &event.Event{}
|
||||
metadata := &DeleteRobotEventMetadata{
|
||||
Ctx: context.Background(),
|
||||
Robot: &model.Robot{
|
||||
ID: 1,
|
||||
},
|
||||
}
|
||||
err := metadata.Resolve(e)
|
||||
t.Require().Nil(err)
|
||||
t.Equal(event2.TopicDeleteRobot, e.Topic)
|
||||
t.Require().NotNil(e.Data)
|
||||
data, ok := e.Data.(*event2.DeleteRobotEvent)
|
||||
t.Require().True(ok)
|
||||
t.Equal(int64(1), data.Robot.ID)
|
||||
}
|
||||
|
||||
func TestRobotEventTestSuite(t *testing.T) {
|
||||
suite.Run(t, &robotEventTestSuite{})
|
||||
}
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/pkg/audit/model"
|
||||
proModels "github.com/goharbor/harbor/src/pkg/project/models"
|
||||
robotModel "github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
)
|
||||
|
||||
@ -47,6 +48,8 @@ const (
|
||||
TopicReplication = "REPLICATION"
|
||||
TopicArtifactLabeled = "ARTIFACT_LABELED"
|
||||
TopicTagRetention = "TAG_RETENTION"
|
||||
TopicCreateRobot = "CREATE_ROBOT"
|
||||
TopicDeleteRobot = "DELETE_ROBOT"
|
||||
)
|
||||
|
||||
// CreateProjectEvent is the creating project event
|
||||
@ -369,3 +372,53 @@ func (r *RetentionEvent) String() string {
|
||||
return fmt.Sprintf("TaskID-%d Status-%s Deleted-%s OccurAt-%s",
|
||||
r.TaskID, r.Status, candidates, r.OccurAt.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
||||
// CreateRobotEvent is the creating robot event
|
||||
type CreateRobotEvent struct {
|
||||
EventType string
|
||||
Robot *robotModel.Robot
|
||||
Operator string
|
||||
OccurAt time.Time
|
||||
}
|
||||
|
||||
// ResolveToAuditLog ...
|
||||
func (c *CreateRobotEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
||||
auditLog := &model.AuditLog{
|
||||
ProjectID: c.Robot.ProjectID,
|
||||
OpTime: c.OccurAt,
|
||||
Operation: rbac.ActionCreate.String(),
|
||||
Username: c.Operator,
|
||||
ResourceType: "robot",
|
||||
Resource: c.Robot.Name}
|
||||
return auditLog, nil
|
||||
}
|
||||
|
||||
func (c *CreateRobotEvent) String() string {
|
||||
return fmt.Sprintf("Name-%s Operator-%s OccurAt-%s",
|
||||
c.Robot.Name, c.Operator, c.OccurAt.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
||||
// DeleteRobotEvent is the deleting robot event
|
||||
type DeleteRobotEvent struct {
|
||||
EventType string
|
||||
Robot *robotModel.Robot
|
||||
Operator string
|
||||
OccurAt time.Time
|
||||
}
|
||||
|
||||
// ResolveToAuditLog ...
|
||||
func (c *DeleteRobotEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
||||
auditLog := &model.AuditLog{
|
||||
ProjectID: c.Robot.ProjectID,
|
||||
OpTime: c.OccurAt,
|
||||
Operation: rbac.ActionDelete.String(),
|
||||
Username: c.Operator,
|
||||
ResourceType: "robot",
|
||||
Resource: c.Robot.Name}
|
||||
return auditLog, nil
|
||||
}
|
||||
|
||||
func (c *DeleteRobotEvent) String() string {
|
||||
return fmt.Sprintf("Name-%s Operator-%s OccurAt-%s",
|
||||
c.Robot.Name, c.Operator, c.OccurAt.Format("2006-01-02 15:04:05"))
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, er
|
||||
}
|
||||
if len(execs) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("garbage collection execution %d not found", id)
|
||||
WithMessagef("garbage collection execution %d not found", id)
|
||||
}
|
||||
return convertExecution(execs[0]), nil
|
||||
}
|
||||
@ -157,7 +157,7 @@ func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) {
|
||||
}
|
||||
if len(tasks) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("garbage collection task %d not found", id)
|
||||
WithMessagef("garbage collection task %d not found", id)
|
||||
}
|
||||
return convertTask(tasks[0]), nil
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func (c *controller) Get(ctx context.Context, digest string) (*Icon, error) {
|
||||
}
|
||||
if len(artifacts) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("the icon %s not found", digest)
|
||||
WithMessagef("the icon %s not found", digest)
|
||||
}
|
||||
_, iconFile, err = c.regCli.PullBlob(artifacts[0].RepositoryName, digest)
|
||||
if err != nil {
|
||||
|
@ -135,20 +135,20 @@ func (c *controller) Create(ctx context.Context, projectNameOrID interface{}, re
|
||||
if req.MemberUser.UserID > 0 {
|
||||
user, err := c.userManager.Get(ctx, req.MemberUser.UserID)
|
||||
if err != nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("Failed to get user %d: %v", req.MemberUser.UserID, err)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("Failed to get user %d: %v", req.MemberUser.UserID, err)
|
||||
}
|
||||
if user == nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("User %d not found", req.MemberUser.UserID)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("User %d not found", req.MemberUser.UserID)
|
||||
}
|
||||
member.EntityID = req.MemberUser.UserID
|
||||
member.EntityType = common.UserMember
|
||||
} else if req.MemberGroup.ID > 0 {
|
||||
g, err := c.groupManager.Get(ctx, req.MemberGroup.ID)
|
||||
if err != nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("Failed to get group %d: %v", req.MemberGroup.ID, err)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("Failed to get group %d: %v", req.MemberGroup.ID, err)
|
||||
}
|
||||
if g == nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("Group %d not found", req.MemberGroup.ID)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("Group %d not found", req.MemberGroup.ID)
|
||||
}
|
||||
member.EntityID = req.MemberGroup.ID
|
||||
} else if len(req.MemberUser.Username) > 0 {
|
||||
|
@ -219,7 +219,7 @@ func (c *controller) DeleteInstance(ctx context.Context, id int64) error {
|
||||
if len(policies) > 0 {
|
||||
return errors.New(nil).
|
||||
WithCode(errors.PreconditionCode).
|
||||
WithMessage("Provider [%s] cannot be deleted as some preheat policies are using it", ins.Name)
|
||||
WithMessagef("Provider [%s] cannot be deleted as some preheat policies are using it", ins.Name)
|
||||
}
|
||||
|
||||
return c.iManager.Delete(ctx, id)
|
||||
@ -246,7 +246,7 @@ func (c *controller) UpdateInstance(ctx context.Context, instance *providerModel
|
||||
if len(policies) > 0 {
|
||||
return errors.New(nil).
|
||||
WithCode(errors.PreconditionCode).
|
||||
WithMessage("Provider [%s] cannot be disabled as some preheat policies are using it", oldIns.Name)
|
||||
WithMessagef("Provider [%s] cannot be disabled as some preheat policies are using it", oldIns.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,8 @@ type preheatSuite struct {
|
||||
suite.Suite
|
||||
ctx context.Context
|
||||
controller Controller
|
||||
fakeInstanceMgr *instance.FakeManager
|
||||
fakePolicyMgr *pmocks.FakeManager
|
||||
fakeInstanceMgr *instance.Manager
|
||||
fakePolicyMgr *pmocks.Manager
|
||||
fakeScheduler *smocks.Scheduler
|
||||
mockInstanceServer *httptest.Server
|
||||
fakeExecutionMgr *tmocks.ExecutionManager
|
||||
@ -40,8 +40,8 @@ type preheatSuite struct {
|
||||
|
||||
func TestPreheatSuite(t *testing.T) {
|
||||
t.Log("Start TestPreheatSuite")
|
||||
fakeInstanceMgr := &instance.FakeManager{}
|
||||
fakePolicyMgr := &pmocks.FakeManager{}
|
||||
fakeInstanceMgr := &instance.Manager{}
|
||||
fakePolicyMgr := &pmocks.Manager{}
|
||||
fakeScheduler := &smocks.Scheduler{}
|
||||
fakeExecutionMgr := &tmocks.ExecutionManager{}
|
||||
|
||||
|
@ -402,7 +402,7 @@ func (de *defaultEnforcer) launchExecutions(ctx context.Context, candidates []*s
|
||||
// Start tasks
|
||||
count := 0
|
||||
for _, c := range candidates {
|
||||
if _, err = de.startTask(ctx, eid, c, insData); err != nil {
|
||||
if _, err = de.startTask(ctx, eid, c, insData, pl.Scope); err != nil {
|
||||
// Just log the error and skip
|
||||
log.Errorf("start task error for preheating image: %s/%s:%s@%s", c.Namespace, c.Repository, c.Tags[0], c.Digest)
|
||||
continue
|
||||
@ -421,7 +421,7 @@ func (de *defaultEnforcer) launchExecutions(ctx context.Context, candidates []*s
|
||||
}
|
||||
|
||||
// startTask starts the preheat task(job) for the given candidate
|
||||
func (de *defaultEnforcer) startTask(ctx context.Context, executionID int64, candidate *selector.Candidate, instance string) (int64, error) {
|
||||
func (de *defaultEnforcer) startTask(ctx context.Context, executionID int64, candidate *selector.Candidate, instance, scope string) (int64, error) {
|
||||
u, err := de.fullURLGetter(candidate)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
@ -441,6 +441,7 @@ func (de *defaultEnforcer) startTask(ctx context.Context, executionID int64, can
|
||||
ImageName: fmt.Sprintf("%s/%s", candidate.Namespace, candidate.Repository),
|
||||
Tag: candidate.Tags[0],
|
||||
Digest: candidate.Digest,
|
||||
Scope: scope,
|
||||
}
|
||||
|
||||
piData, err := pi.ToJSON()
|
||||
|
@ -70,7 +70,7 @@ func (suite *EnforcerTestSuite) SetupSuite() {
|
||||
suite.server.StartTLS()
|
||||
|
||||
fakePolicies := mockPolicies()
|
||||
fakePolicyManager := &policy.FakeManager{}
|
||||
fakePolicyManager := &policy.Manager{}
|
||||
fakePolicyManager.On("Get",
|
||||
context.TODO(),
|
||||
mock.AnythingOfType("int64")).
|
||||
@ -130,7 +130,7 @@ func (suite *EnforcerTestSuite) SetupSuite() {
|
||||
},
|
||||
}, nil)
|
||||
|
||||
fakeInstanceMgr := &instance.FakeManager{}
|
||||
fakeInstanceMgr := &instance.Manager{}
|
||||
fakeInstanceMgr.On("Get",
|
||||
context.TODO(),
|
||||
mock.AnythingOfType("int64"),
|
||||
@ -210,6 +210,7 @@ func mockPolicies() []*po.Schema {
|
||||
Type: po.TriggerTypeManual,
|
||||
},
|
||||
Enabled: true,
|
||||
Scope: "single_peer",
|
||||
CreatedAt: time.Now().UTC(),
|
||||
UpdatedTime: time.Now().UTC(),
|
||||
}, {
|
||||
@ -235,6 +236,7 @@ func mockPolicies() []*po.Schema {
|
||||
Trigger: &po.Trigger{
|
||||
Type: po.TriggerTypeEventBased,
|
||||
},
|
||||
Scope: "all_peers",
|
||||
Enabled: true,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
UpdatedTime: time.Now().UTC(),
|
||||
|
@ -264,7 +264,7 @@ func (c *controller) HeadManifest(_ context.Context, art lib.ArtifactInfo, remot
|
||||
func (c *controller) ProxyBlob(ctx context.Context, p *proModels.Project, art lib.ArtifactInfo) (int64, io.ReadCloser, error) {
|
||||
remoteRepo := getRemoteRepo(art)
|
||||
log.Debugf("The blob doesn't exist, proxy the request to the target server, url:%v", remoteRepo)
|
||||
rHelper, err := NewRemoteHelper(ctx, p.RegistryID)
|
||||
rHelper, err := NewRemoteHelper(ctx, p.RegistryID, WithSpeed(p.ProxyCacheSpeed()))
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
@ -12,6 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package replication
|
||||
package proxy
|
||||
|
||||
//go:generate mockery --dir ./flow --name Controller --output . --outpkg replication --filename mock_flow_controller_test.go --structname flowController
|
||||
type Option func(*Options)
|
||||
|
||||
type Options struct {
|
||||
// Speed is the data transfer speed for proxy cache from Harbor to upstream registry, no limit by default.
|
||||
Speed int32
|
||||
}
|
||||
|
||||
func NewOptions(opts ...Option) *Options {
|
||||
o := &Options{}
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
func WithSpeed(speed int32) Option {
|
||||
return func(o *Options) {
|
||||
o.Speed = speed
|
||||
}
|
||||
}
|
@ -12,6 +12,22 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scheduler
|
||||
package proxy
|
||||
|
||||
//go:generate mockery --name DAO --output . --outpkg scheduler --filename mock_dao_test.go --structname mockDAO --inpackage
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewOptions(t *testing.T) {
|
||||
// test default options
|
||||
o := NewOptions()
|
||||
assert.Equal(t, int32(0), o.Speed)
|
||||
|
||||
// test with options
|
||||
// with speed
|
||||
withSpeed := WithSpeed(1024)
|
||||
o = NewOptions(withSpeed)
|
||||
assert.Equal(t, int32(1024), o.Speed)
|
||||
}
|
@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/pkg/reg"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/adapter"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/model"
|
||||
@ -43,13 +44,16 @@ type remoteHelper struct {
|
||||
regID int64
|
||||
registry adapter.ArtifactRegistry
|
||||
registryMgr reg.Manager
|
||||
opts *Options
|
||||
}
|
||||
|
||||
// NewRemoteHelper create a remote interface
|
||||
func NewRemoteHelper(ctx context.Context, regID int64) (RemoteInterface, error) {
|
||||
func NewRemoteHelper(ctx context.Context, regID int64, opts ...Option) (RemoteInterface, error) {
|
||||
r := &remoteHelper{
|
||||
regID: regID,
|
||||
registryMgr: reg.Mgr}
|
||||
registryMgr: reg.Mgr,
|
||||
opts: NewOptions(opts...),
|
||||
}
|
||||
if err := r.init(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -83,7 +87,14 @@ func (r *remoteHelper) init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (r *remoteHelper) BlobReader(repo, dig string) (int64, io.ReadCloser, error) {
|
||||
return r.registry.PullBlob(repo, dig)
|
||||
sz, bReader, err := r.registry.PullBlob(repo, dig)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if r.opts != nil && r.opts.Speed > 0 {
|
||||
bReader = lib.NewReader(bReader, r.opts.Speed)
|
||||
}
|
||||
return sz, bReader, err
|
||||
}
|
||||
|
||||
func (r *remoteHelper) Manifest(repo string, ref string) (distribution.Manifest, string, error) {
|
||||
|
@ -123,6 +123,7 @@ func flushQuota(ctx context.Context) {
|
||||
iter, err := cache.Default().Scan(ctx, "quota:*")
|
||||
if err != nil {
|
||||
log.Errorf("failed to scan out the quota records from redis")
|
||||
return
|
||||
}
|
||||
|
||||
for iter.Next(ctx) {
|
||||
@ -349,7 +350,7 @@ func (c *controller) updateUsageWithRetry(ctx context.Context, reference, refere
|
||||
options := []retry.Option{
|
||||
retry.Timeout(defaultRetryTimeout),
|
||||
retry.Backoff(false),
|
||||
retry.Callback(func(err error, sleep time.Duration) {
|
||||
retry.Callback(func(err error, _ time.Duration) {
|
||||
log.G(ctx).Debugf("failed to update the quota usage for %s %s, error: %v", reference, referenceID, err)
|
||||
}),
|
||||
}
|
||||
@ -488,7 +489,7 @@ func reserveResources(resources types.ResourceList) func(hardLimits, used types.
|
||||
newUsed := types.Add(used, resources)
|
||||
|
||||
if err := quota.IsSafe(hardLimits, used, newUsed, false); err != nil {
|
||||
return nil, errors.DeniedError(err).WithMessage("Quota exceeded when processing the request of %v", err)
|
||||
return nil, errors.DeniedError(err).WithMessagef("Quota exceeded when processing the request of %v", err)
|
||||
}
|
||||
|
||||
return newUsed, nil
|
||||
@ -496,7 +497,7 @@ func reserveResources(resources types.ResourceList) func(hardLimits, used types.
|
||||
}
|
||||
|
||||
func rollbackResources(resources types.ResourceList) func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
return func(hardLimits, used types.ResourceList) (types.ResourceList, error) {
|
||||
return func(_, used types.ResourceList) (types.ResourceList, error) {
|
||||
newUsed := types.Subtract(used, resources)
|
||||
// ensure that new used is never negative
|
||||
if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
|
||||
|
@ -75,7 +75,7 @@ func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader
|
||||
for _, projectID := range projectIDs {
|
||||
project, ok := projectsMap[projectID]
|
||||
if !ok {
|
||||
err := errors.NotFoundError(nil).WithMessage("project %d not found", projectID)
|
||||
err := errors.NotFoundError(nil).WithMessagef("project %d not found", projectID)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||
}
|
||||
// referenced by replication policy as destination registry
|
||||
count, err = c.repMgr.Count(ctx, &q.Query{
|
||||
@ -148,7 +148,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by replication policies, cannot delete it", id)
|
||||
}
|
||||
// referenced by proxy cache project
|
||||
count, err = c.proMgr.Count(ctx, &q.Query{
|
||||
@ -160,7 +160,7 @@ func (c *controller) Delete(ctx context.Context, id int64) error {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessage("the registry %d is referenced by proxy cache project, cannot delete it", id)
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).WithMessagef("the registry %d is referenced by proxy cache project, cannot delete it", id)
|
||||
}
|
||||
|
||||
return c.regMgr.Delete(ctx, id)
|
||||
|
@ -102,7 +102,7 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||
logger := log.GetLogger(ctx)
|
||||
if !policy.Enabled {
|
||||
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
||||
WithMessage("the policy %d is disabled", policy.ID)
|
||||
WithMessagef("the policy %d is disabled", policy.ID)
|
||||
}
|
||||
// create an execution record
|
||||
extra := make(map[string]interface{})
|
||||
@ -213,7 +213,7 @@ func (c *controller) GetExecution(ctx context.Context, id int64) (*Execution, er
|
||||
}
|
||||
if len(execs) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("replication execution %d not found", id)
|
||||
WithMessagef("replication execution %d not found", id)
|
||||
}
|
||||
return convertExecution(execs[0]), nil
|
||||
}
|
||||
@ -250,7 +250,7 @@ func (c *controller) GetTask(ctx context.Context, id int64) (*Task, error) {
|
||||
}
|
||||
if len(tasks) == 0 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).
|
||||
WithMessage("replication task %d not found", id)
|
||||
WithMessagef("replication task %d not found", id)
|
||||
}
|
||||
return convertTask(tasks[0]), nil
|
||||
}
|
||||
|
@ -24,6 +24,3 @@ type registryAdapter interface {
|
||||
adapter.Adapter
|
||||
adapter.ArtifactRegistry
|
||||
}
|
||||
|
||||
//go:generate mockery --dir . --name registryAdapter --output . --outpkg flow --filename mock_adapter_test.go --structname mockAdapter
|
||||
//go:generate mockery --dir ../../../pkg/reg/adapter --name Factory --output . --outpkg flow --filename mock_adapter_factory_test.go --structname mockFactory
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
|
@ -203,7 +203,7 @@ func replaceNamespace(repository string, namespace string, replaceCount int8, ds
|
||||
dstRepoPrefix = namespace
|
||||
case int(replaceCount) > srcLength-1: // invalid replace count
|
||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("the source repository %q contains only %d path components %v excepting the last one, but the destination namespace flattening level is %d",
|
||||
WithMessagef("the source repository %q contains only %d path components %v excepting the last one, but the destination namespace flattening level is %d",
|
||||
repository, srcLength-1, srcRepoPathComponents[:srcLength-1], replaceCount)
|
||||
default:
|
||||
dstRepoPrefix = namespace + "/" + strings.Join(srcRepoPathComponents[replaceCount:srcLength-1], "/")
|
||||
@ -216,12 +216,12 @@ func replaceNamespace(repository string, namespace string, replaceCount int8, ds
|
||||
switch dstRepoComponentPathType {
|
||||
case model.RepositoryPathComponentTypeOnlyTwo:
|
||||
if dstLength != 2 {
|
||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("the destination repository %q contains %d path components %v, but the destination registry only supports 2",
|
||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("the destination repository %q contains %d path components %v, but the destination registry only supports 2",
|
||||
dstRepo, dstLength, dstRepoPathComponents)
|
||||
}
|
||||
case model.RepositoryPathComponentTypeAtLeastTwo:
|
||||
if dstLength < 2 {
|
||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessage("the destination repository %q contains only %d path components %v, but the destination registry requires at least 2",
|
||||
return "", errors.New(nil).WithCode(errors.BadRequestCode).WithMessagef("the destination repository %q contains only %d path components %v, but the destination registry requires at least 2",
|
||||
dstRepo, dstLength, dstRepoPathComponents)
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
// Code generated by mockery v2.46.2. DO NOT EDIT.
|
||||
|
||||
package replication
|
||||
|
||||
|
@ -91,7 +91,7 @@ func (p *Policy) Validate() error {
|
||||
if len(p.DestNamespace) > 0 {
|
||||
if !lib.RepositoryNameRe.MatchString(p.DestNamespace) {
|
||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("invalid destination namespace: %s", p.DestNamespace)
|
||||
WithMessagef("invalid destination namespace: %s", p.DestNamespace)
|
||||
}
|
||||
}
|
||||
|
||||
@ -102,11 +102,11 @@ func (p *Policy) Validate() error {
|
||||
case model.TriggerTypeScheduled:
|
||||
if p.Trigger.Settings == nil || len(p.Trigger.Settings.Cron) == 0 {
|
||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("the cron string cannot be empty when the trigger type is %s", model.TriggerTypeScheduled)
|
||||
WithMessagef("the cron string cannot be empty when the trigger type is %s", model.TriggerTypeScheduled)
|
||||
}
|
||||
if _, err := utils.CronParser().Parse(p.Trigger.Settings.Cron); err != nil {
|
||||
return errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("invalid cron string for scheduled trigger: %s", p.Trigger.Settings.Cron)
|
||||
WithMessagef("invalid cron string for scheduled trigger: %s", p.Trigger.Settings.Cron)
|
||||
}
|
||||
cronParts := strings.Split(p.Trigger.Settings.Cron, " ")
|
||||
if cronParts[0] != "0" {
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
|
||||
common_http "github.com/goharbor/harbor/src/common/http"
|
||||
trans "github.com/goharbor/harbor/src/controller/replication/transfer"
|
||||
"github.com/goharbor/harbor/src/lib"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/adapter"
|
||||
"github.com/goharbor/harbor/src/pkg/reg/model"
|
||||
@ -380,7 +381,7 @@ func (t *transfer) copyBlobByMonolithic(srcRepo, dstRepo, digest string, sizeFro
|
||||
return err
|
||||
}
|
||||
if speed > 0 {
|
||||
data = trans.NewReader(data, speed)
|
||||
data = lib.NewReader(data, speed)
|
||||
}
|
||||
defer data.Close()
|
||||
// get size 0 from PullBlob, use size from distribution.Descriptor instead.
|
||||
@ -435,7 +436,7 @@ func (t *transfer) copyBlobByChunk(srcRepo, dstRepo, digest string, sizeFromDesc
|
||||
}
|
||||
|
||||
if speed > 0 {
|
||||
data = trans.NewReader(data, speed)
|
||||
data = lib.NewReader(data, speed)
|
||||
}
|
||||
// failureEnd will only be used for adjusting content range when issue happened during push the chunk.
|
||||
var failureEnd int64
|
||||
|
@ -36,8 +36,6 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
)
|
||||
|
||||
// go:generate mockery -name Controller -case snake
|
||||
|
||||
// Controller to handle the requests related with retention
|
||||
type Controller interface {
|
||||
GetRetention(ctx context.Context, id int64) (*policy.Metadata, error)
|
||||
|
@ -23,12 +23,14 @@ import (
|
||||
|
||||
rbac_project "github.com/goharbor/harbor/src/common/rbac/project"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/controller/event/metadata"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
"github.com/goharbor/harbor/src/lib/retry"
|
||||
"github.com/goharbor/harbor/src/pkg"
|
||||
"github.com/goharbor/harbor/src/pkg/notification"
|
||||
"github.com/goharbor/harbor/src/pkg/permission/types"
|
||||
"github.com/goharbor/harbor/src/pkg/project"
|
||||
"github.com/goharbor/harbor/src/pkg/rbac"
|
||||
@ -95,10 +97,6 @@ func (d *controller) Count(ctx context.Context, query *q.Query) (int64, error) {
|
||||
|
||||
// Create ...
|
||||
func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error) {
|
||||
if err := d.setProject(ctx, r); err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
var expiresAt int64
|
||||
if r.Duration == -1 {
|
||||
expiresAt = -1
|
||||
@ -121,7 +119,8 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
|
||||
if r.Level == LEVELPROJECT {
|
||||
name = fmt.Sprintf("%s+%s", r.ProjectName, r.Name)
|
||||
}
|
||||
robotID, err := d.robotMgr.Create(ctx, &model.Robot{
|
||||
|
||||
rCreate := &model.Robot{
|
||||
Name: name,
|
||||
Description: r.Description,
|
||||
ProjectID: r.ProjectID,
|
||||
@ -130,7 +129,10 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
|
||||
Duration: r.Duration,
|
||||
Salt: salt,
|
||||
Visible: r.Visible,
|
||||
})
|
||||
CreatorRef: r.CreatorRef,
|
||||
CreatorType: r.CreatorType,
|
||||
}
|
||||
robotID, err := d.robotMgr.Create(ctx, rCreate)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
@ -138,17 +140,31 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
|
||||
if err := d.createPermission(ctx, r); err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
// fire event
|
||||
notification.AddEvent(ctx, &metadata.CreateRobotEventMetadata{
|
||||
Ctx: ctx,
|
||||
Robot: rCreate,
|
||||
})
|
||||
return robotID, pwd, nil
|
||||
}
|
||||
|
||||
// Delete ...
|
||||
func (d *controller) Delete(ctx context.Context, id int64) error {
|
||||
rDelete, err := d.robotMgr.Get(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.robotMgr.Delete(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.rbacMgr.DeletePermissionsByRole(ctx, ROBOTTYPE, id); err != nil {
|
||||
return err
|
||||
}
|
||||
// fire event
|
||||
notification.AddEvent(ctx, &metadata.DeleteRobotEventMetadata{
|
||||
Ctx: ctx,
|
||||
Robot: rDelete,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -307,22 +323,6 @@ func (d *controller) populatePermissions(ctx context.Context, r *Robot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// set the project info if it's a project level robot
|
||||
func (d *controller) setProject(ctx context.Context, r *Robot) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
if r.Level == LEVELPROJECT {
|
||||
pro, err := d.proMgr.Get(ctx, r.Permissions[0].Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ProjectName = pro.Name
|
||||
r.ProjectID = pro.ProjectID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertScope converts the db scope into robot model
|
||||
// /system => Kind: system Namespace: /
|
||||
// /project/* => Kind: project Namespace: *
|
||||
@ -374,6 +374,22 @@ func (d *controller) toScope(ctx context.Context, p *Permission) (string, error)
|
||||
return "", errors.New(nil).WithMessage("unknown robot kind").WithCode(errors.BadRequestCode)
|
||||
}
|
||||
|
||||
// set the project info if it's a project level robot
|
||||
func SetProject(ctx context.Context, r *Robot) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
if r.Level == LEVELPROJECT {
|
||||
pro, err := project.New().Get(ctx, r.Permissions[0].Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.ProjectName = pro.Name
|
||||
r.ProjectID = pro.ProjectID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateSec(salt ...string) (string, string, string, error) {
|
||||
var secret, pwd string
|
||||
options := []retry.Option{
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/security"
|
||||
"github.com/goharbor/harbor/src/common/utils/test"
|
||||
"github.com/goharbor/harbor/src/lib/config"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
@ -18,6 +19,7 @@ import (
|
||||
rbac_model "github.com/goharbor/harbor/src/pkg/rbac/model"
|
||||
"github.com/goharbor/harbor/src/pkg/robot/model"
|
||||
htesting "github.com/goharbor/harbor/src/testing"
|
||||
testsec "github.com/goharbor/harbor/src/testing/common/security"
|
||||
"github.com/goharbor/harbor/src/testing/mock"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/project"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/rbac"
|
||||
@ -102,7 +104,9 @@ func (suite *ControllerTestSuite) TestCreate() {
|
||||
robotMgr := &robot.Manager{}
|
||||
|
||||
c := controller{robotMgr: robotMgr, rbacMgr: rbacMgr, proMgr: projectMgr}
|
||||
ctx := context.TODO()
|
||||
secCtx := &testsec.Context{}
|
||||
secCtx.On("GetUsername").Return("security-context-user")
|
||||
ctx := security.NewContext(context.Background(), secCtx)
|
||||
projectMgr.On("Get", mock.Anything, mock.Anything).Return(&proModels.Project{ProjectID: 1, Name: "library"}, nil)
|
||||
robotMgr.On("Create", mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
rbacMgr.On("CreateRbacPolicy", mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
@ -145,6 +149,12 @@ func (suite *ControllerTestSuite) TestDelete() {
|
||||
c := controller{robotMgr: robotMgr, rbacMgr: rbacMgr, proMgr: projectMgr}
|
||||
ctx := context.TODO()
|
||||
|
||||
robotMgr.On("Get", mock.Anything, mock.Anything).Return(&model.Robot{
|
||||
Name: "library+test",
|
||||
Description: "test get method",
|
||||
ProjectID: 1,
|
||||
Secret: utils.RandStringBytes(10),
|
||||
}, nil)
|
||||
robotMgr.On("Delete", mock.Anything, mock.Anything).Return(nil)
|
||||
rbacMgr.On("DeletePermissionsByRole", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
|
||||
|
@ -39,10 +39,11 @@ const (
|
||||
// Robot ...
|
||||
type Robot struct {
|
||||
model.Robot
|
||||
ProjectName string
|
||||
Level string
|
||||
Editable bool `json:"editable"`
|
||||
Permissions []*Permission `json:"permissions"`
|
||||
ProjectName string
|
||||
ProjectNameOrID interface{}
|
||||
Level string
|
||||
Editable bool `json:"editable"`
|
||||
Permissions []*Permission `json:"permissions"`
|
||||
}
|
||||
|
||||
// IsSysLevel, true is a system level robot, others are project level.
|
||||
|
@ -243,12 +243,12 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
|
||||
// In case it does not exist
|
||||
if r == nil {
|
||||
return errors.PreconditionFailedError(nil).WithMessage("no available scanner for project: %d", artifact.ProjectID)
|
||||
return errors.PreconditionFailedError(nil).WithMessagef("no available scanner for project: %d", artifact.ProjectID)
|
||||
}
|
||||
|
||||
// Check if it is disabled
|
||||
if r.Disabled {
|
||||
return errors.PreconditionFailedError(nil).WithMessage("scanner %s is deactivated", r.Name)
|
||||
return errors.PreconditionFailedError(nil).WithMessagef("scanner %s is deactivated", r.Name)
|
||||
}
|
||||
|
||||
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
||||
@ -266,7 +266,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
// skip to return err for event related scan
|
||||
return nil
|
||||
}
|
||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
return errors.BadRequestError(nil).WithMessagef("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -376,8 +376,7 @@ func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact, capT
|
||||
}
|
||||
|
||||
if len(executions) == 0 {
|
||||
message := fmt.Sprintf("no scan job for artifact digest=%v", artifact.Digest)
|
||||
return errors.BadRequestError(nil).WithMessage(message)
|
||||
return errors.BadRequestError(nil).WithMessagef("no scan job for artifact digest=%v", artifact.Digest)
|
||||
}
|
||||
execution := executions[0]
|
||||
return bc.execMgr.Stop(ctx, execution.ID)
|
||||
@ -590,7 +589,7 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
||||
}
|
||||
|
||||
if r == nil {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("no scanner registration configured for project: %d", artifact.ProjectID)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("no scanner registration configured for project: %d", artifact.ProjectID)
|
||||
}
|
||||
|
||||
artifacts, scannable, err := bc.collectScanningArtifacts(ctx, r, artifact)
|
||||
@ -599,7 +598,7 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("report not found for %s@%s", artifact.RepositoryName, artifact.Digest)
|
||||
}
|
||||
|
||||
groupReports := make([][]*scan.Report, len(artifacts))
|
||||
@ -681,7 +680,7 @@ func (bc *basicController) GetScanLog(ctx context.Context, artifact *ar.Artifact
|
||||
reportUUIDToTasks := map[string]*task.Task{}
|
||||
for _, t := range tasks {
|
||||
if !scanTaskForArtifacts(t, artifactMap) {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("scan log with uuid: %s not found", uuid)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("scan log with uuid: %s not found", uuid)
|
||||
}
|
||||
for _, reportUUID := range GetReportUUIDs(t.ExtraAttrs) {
|
||||
reportUUIDToTasks[reportUUID] = t
|
||||
@ -864,8 +863,11 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
||||
Description: "for scan",
|
||||
ProjectID: projectID,
|
||||
Duration: -1,
|
||||
CreatorType: "local",
|
||||
CreatorRef: int64(0),
|
||||
},
|
||||
Level: robot.LEVELPROJECT,
|
||||
ProjectName: projectName,
|
||||
Level: robot.LEVELPROJECT,
|
||||
Permissions: []*robot.Permission{
|
||||
{
|
||||
Kind: "project",
|
||||
@ -1040,7 +1042,7 @@ func (bc *basicController) getScanTask(ctx context.Context, reportUUID string) (
|
||||
}
|
||||
|
||||
if len(tasks) == 0 {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("task for report %s not found", reportUUID)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("task for report %s not found", reportUUID)
|
||||
}
|
||||
|
||||
return tasks[0], nil
|
||||
|
@ -82,7 +82,7 @@ type ControllerTestSuite struct {
|
||||
reportMgr *reporttesting.Manager
|
||||
ar artifact.Controller
|
||||
c *basicController
|
||||
reportConverter *postprocessorstesting.ScanReportV1ToV2Converter
|
||||
reportConverter *postprocessorstesting.NativeScanReportConverter
|
||||
cache *mockcache.Cache
|
||||
}
|
||||
|
||||
@ -235,8 +235,11 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
Description: "for scan",
|
||||
ProjectID: suite.artifact.ProjectID,
|
||||
Duration: -1,
|
||||
CreatorType: "local",
|
||||
CreatorRef: int64(0),
|
||||
},
|
||||
Level: robot.LEVELPROJECT,
|
||||
ProjectName: "library",
|
||||
Level: robot.LEVELPROJECT,
|
||||
Permissions: []*robot.Permission{
|
||||
{
|
||||
Kind: "project",
|
||||
@ -266,6 +269,8 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
Description: "for scan",
|
||||
ProjectID: suite.artifact.ProjectID,
|
||||
Duration: -1,
|
||||
CreatorType: "local",
|
||||
CreatorRef: int64(0),
|
||||
},
|
||||
Level: "project",
|
||||
}, nil)
|
||||
@ -339,7 +344,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
|
||||
execMgr: suite.execMgr,
|
||||
taskMgr: suite.taskMgr,
|
||||
reportConverter: &postprocessorstesting.ScanReportV1ToV2Converter{},
|
||||
reportConverter: &postprocessorstesting.NativeScanReportConverter{},
|
||||
cache: func() cache.Cache { return suite.cache },
|
||||
}
|
||||
mock.OnAnything(suite.scanHandler, "JobVendorType").Return("IMAGE_SCAN")
|
||||
@ -486,6 +491,7 @@ func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
||||
{ExtraAttrs: suite.makeExtraAttrs(int64(1), "rp-uuid-001")},
|
||||
}, nil).Once()
|
||||
mock.OnAnything(suite.accessoryMgr, "List").Return(nil, nil)
|
||||
mock.OnAnything(suite.c.reportConverter, "FromRelationalSchema").Return("", nil)
|
||||
rep, err := suite.c.GetReport(ctx, suite.artifact, []string{v1.MimeTypeNativeReport})
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), 1, len(rep))
|
||||
|
@ -51,7 +51,7 @@ type CallbackTestSuite struct {
|
||||
scanCtl Controller
|
||||
|
||||
taskMgr *tasktesting.Manager
|
||||
reportConverter *postprocessorstesting.ScanReportV1ToV2Converter
|
||||
reportConverter *postprocessorstesting.NativeScanReportConverter
|
||||
}
|
||||
|
||||
func (suite *CallbackTestSuite) SetupSuite() {
|
||||
@ -69,7 +69,7 @@ func (suite *CallbackTestSuite) SetupSuite() {
|
||||
suite.taskMgr = &tasktesting.Manager{}
|
||||
taskMgr = suite.taskMgr
|
||||
|
||||
suite.reportConverter = &postprocessorstesting.ScanReportV1ToV2Converter{}
|
||||
suite.reportConverter = &postprocessorstesting.NativeScanReportConverter{}
|
||||
|
||||
suite.scanCtl = &basicController{
|
||||
makeCtx: context.TODO,
|
||||
|
@ -98,7 +98,7 @@ func (bc *basicController) GetTotalOfRegistrations(ctx context.Context, query *q
|
||||
// CreateRegistration ...
|
||||
func (bc *basicController) CreateRegistration(ctx context.Context, registration *scanner.Registration) (string, error) {
|
||||
if isReservedName(registration.Name) {
|
||||
return "", errors.BadRequestError(nil).WithMessage(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||
return "", errors.BadRequestError(nil).WithMessagef(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||
}
|
||||
|
||||
// Check if the registration is available
|
||||
@ -168,7 +168,7 @@ func (bc *basicController) UpdateRegistration(ctx context.Context, registration
|
||||
}
|
||||
|
||||
if isReservedName(registration.Name) {
|
||||
return errors.BadRequestError(nil).WithMessage(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||
return errors.BadRequestError(nil).WithMessagef(`name "%s" is reserved, please try a different name`, registration.Name)
|
||||
}
|
||||
|
||||
return bc.manager.Update(ctx, registration)
|
||||
@ -343,7 +343,7 @@ func (bc *basicController) GetMetadata(ctx context.Context, registrationUUID str
|
||||
}
|
||||
|
||||
if r == nil {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("registration %s not found", registrationUUID)
|
||||
return nil, errors.NotFoundError(nil).WithMessagef("registration %s not found", registrationUUID)
|
||||
}
|
||||
|
||||
return bc.Ping(ctx, r)
|
||||
@ -402,7 +402,7 @@ type MetadataResult struct {
|
||||
func (m *MetadataResult) Unpack() (*v1.ScannerAdapterMetadata, error) {
|
||||
var err error
|
||||
if m.Error != "" {
|
||||
err = fmt.Errorf(m.Error)
|
||||
err = errors.New(nil).WithMessage(m.Error)
|
||||
}
|
||||
|
||||
return m.Metadata, err
|
||||
|
@ -147,11 +147,11 @@ func (c *controller) attachTags(ctx context.Context, vuls []*secHubModel.Vulnera
|
||||
}
|
||||
|
||||
// get tags in the artifact list
|
||||
var artifactIds []interface{}
|
||||
var artifactIDs []interface{}
|
||||
for k := range artifactTagMap {
|
||||
artifactIds = append(artifactIds, k)
|
||||
artifactIDs = append(artifactIDs, k)
|
||||
}
|
||||
query := q.New(q.KeyWords{"artifact_id": q.NewOrList(artifactIds)})
|
||||
query := q.New(q.KeyWords{"artifact_id": q.NewOrList(artifactIDs)})
|
||||
tags, err := c.tagMgr.List(ctx, query)
|
||||
if err != nil {
|
||||
return vuls, err
|
||||
|
@ -44,7 +44,7 @@ type ControllerTestSuite struct {
|
||||
c *controller
|
||||
scannerMgr *scannerMock.Manager
|
||||
secHubMgr *securityMock.Manager
|
||||
tagMgr *tagMock.FakeManager
|
||||
tagMgr *tagMock.Manager
|
||||
}
|
||||
|
||||
// TestController is the entry of controller test suite
|
||||
@ -56,7 +56,7 @@ func TestController(t *testing.T) {
|
||||
func (suite *ControllerTestSuite) SetupTest() {
|
||||
suite.secHubMgr = &securityMock.Manager{}
|
||||
suite.scannerMgr = &scannerMock.Manager{}
|
||||
suite.tagMgr = &tagMock.FakeManager{}
|
||||
suite.tagMgr = &tagMock.Manager{}
|
||||
|
||||
suite.c = &controller{
|
||||
secHubMgr: suite.secHubMgr,
|
||||
|
@ -97,7 +97,7 @@ func (c *controller) Ensure(ctx context.Context, repositoryID, artifactID int64,
|
||||
// existing tag must check the immutable status and signature
|
||||
if tag.Immutable {
|
||||
return 0, errors.New(nil).WithCode(errors.PreconditionCode).
|
||||
WithMessage("the tag %s configured as immutable, cannot be updated", tag.Name)
|
||||
WithMessagef("the tag %s configured as immutable, cannot be updated", tag.Name)
|
||||
}
|
||||
// the tag exists under the repository, but it is attached to other artifact
|
||||
// update it to point to the provided artifact
|
||||
@ -189,7 +189,7 @@ func (c *controller) Delete(ctx context.Context, id int64) (err error) {
|
||||
}
|
||||
if tag.Immutable {
|
||||
return errors.New(nil).WithCode(errors.PreconditionCode).
|
||||
WithMessage("the tag %s configured as immutable, cannot be deleted", tag.Name)
|
||||
WithMessagef("the tag %s configured as immutable, cannot be deleted", tag.Name)
|
||||
}
|
||||
return c.tagMgr.Delete(ctx, id)
|
||||
}
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
@ -27,6 +26,7 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/pkg/config/inmemory"
|
||||
"github.com/goharbor/harbor/src/pkg/tag/model/tag"
|
||||
ormtesting "github.com/goharbor/harbor/src/testing/lib/orm"
|
||||
"github.com/goharbor/harbor/src/testing/mock"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/immutable"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/repository"
|
||||
@ -38,14 +38,14 @@ type controllerTestSuite struct {
|
||||
ctl *controller
|
||||
repoMgr *repository.Manager
|
||||
artMgr *artifact.Manager
|
||||
tagMgr *tagtesting.FakeManager
|
||||
tagMgr *tagtesting.Manager
|
||||
immutableMtr *immutable.FakeMatcher
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) SetupTest() {
|
||||
c.repoMgr = &repository.Manager{}
|
||||
c.artMgr = &artifact.Manager{}
|
||||
c.tagMgr = &tagtesting.FakeManager{}
|
||||
c.tagMgr = &tagtesting.Manager{}
|
||||
c.immutableMtr = &immutable.FakeMatcher{}
|
||||
c.ctl = &controller{
|
||||
tagMgr: c.tagMgr,
|
||||
@ -56,7 +56,7 @@ func (c *controllerTestSuite) SetupTest() {
|
||||
|
||||
func (c *controllerTestSuite) TestEnsureTag() {
|
||||
// the tag already exists under the repository and is attached to the artifact
|
||||
c.tagMgr.On("List").Return([]*tag.Tag{
|
||||
c.tagMgr.On("List", mock.Anything, mock.Anything).Return([]*tag.Tag{
|
||||
{
|
||||
ID: 1,
|
||||
RepositoryID: 1,
|
||||
@ -67,7 +67,7 @@ func (c *controllerTestSuite) TestEnsureTag() {
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(false, nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(false, nil)
|
||||
_, err := c.ctl.Ensure(orm.NewContext(nil, &ormtesting.FakeOrmer{}), 1, 1, "latest")
|
||||
c.Require().Nil(err)
|
||||
c.tagMgr.AssertExpectations(c.T())
|
||||
@ -76,7 +76,7 @@ func (c *controllerTestSuite) TestEnsureTag() {
|
||||
c.SetupTest()
|
||||
|
||||
// the tag exists under the repository, but it is attached to other artifact
|
||||
c.tagMgr.On("List").Return([]*tag.Tag{
|
||||
c.tagMgr.On("List", mock.Anything, mock.Anything).Return([]*tag.Tag{
|
||||
{
|
||||
ID: 1,
|
||||
RepositoryID: 1,
|
||||
@ -84,11 +84,11 @@ func (c *controllerTestSuite) TestEnsureTag() {
|
||||
Name: "latest",
|
||||
},
|
||||
}, nil)
|
||||
c.tagMgr.On("Update").Return(nil)
|
||||
c.tagMgr.On("Update", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(false, nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(false, nil)
|
||||
_, err = c.ctl.Ensure(orm.NewContext(nil, &ormtesting.FakeOrmer{}), 1, 1, "latest")
|
||||
c.Require().Nil(err)
|
||||
c.tagMgr.AssertExpectations(c.T())
|
||||
@ -97,26 +97,26 @@ func (c *controllerTestSuite) TestEnsureTag() {
|
||||
c.SetupTest()
|
||||
|
||||
// the tag doesn't exist under the repository, create it
|
||||
c.tagMgr.On("List").Return([]*tag.Tag{}, nil)
|
||||
c.tagMgr.On("Create").Return(1, nil)
|
||||
c.tagMgr.On("List", mock.Anything, mock.Anything).Return([]*tag.Tag{}, nil)
|
||||
c.tagMgr.On("Create", mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(false, nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(false, nil)
|
||||
_, err = c.ctl.Ensure(orm.NewContext(nil, &ormtesting.FakeOrmer{}), 1, 1, "latest")
|
||||
c.Require().Nil(err)
|
||||
c.tagMgr.AssertExpectations(c.T())
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestCount() {
|
||||
c.tagMgr.On("Count").Return(1, nil)
|
||||
c.tagMgr.On("Count", mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
total, err := c.ctl.Count(nil, nil)
|
||||
c.Require().Nil(err)
|
||||
c.Equal(int64(1), total)
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestList() {
|
||||
c.tagMgr.On("List").Return([]*tag.Tag{
|
||||
c.tagMgr.On("List", mock.Anything, mock.Anything).Return([]*tag.Tag{
|
||||
{
|
||||
RepositoryID: 1,
|
||||
Name: "testlist",
|
||||
@ -134,7 +134,7 @@ func (c *controllerTestSuite) TestGet() {
|
||||
getTest.RepositoryID = 1
|
||||
getTest.Name = "testget"
|
||||
|
||||
c.tagMgr.On("Get").Return(getTest, nil)
|
||||
c.tagMgr.On("Get", mock.Anything, mock.Anything).Return(getTest, nil)
|
||||
tag, err := c.ctl.Get(nil, 1, nil)
|
||||
c.Require().Nil(err)
|
||||
c.tagMgr.AssertExpectations(c.T())
|
||||
@ -143,36 +143,36 @@ func (c *controllerTestSuite) TestGet() {
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestDelete() {
|
||||
c.tagMgr.On("Get").Return(&tag.Tag{
|
||||
c.tagMgr.On("Get", mock.Anything, mock.Anything).Return(&tag.Tag{
|
||||
RepositoryID: 1,
|
||||
Name: "test",
|
||||
}, nil)
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(false, nil)
|
||||
c.tagMgr.On("Delete").Return(nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(false, nil)
|
||||
c.tagMgr.On("Delete", mock.Anything, mock.Anything).Return(nil)
|
||||
err := c.ctl.Delete(nil, 1)
|
||||
c.Require().Nil(err)
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestDeleteImmutable() {
|
||||
c.tagMgr.On("Get").Return(&tag.Tag{
|
||||
c.tagMgr.On("Get", mock.Anything, mock.Anything).Return(&tag.Tag{
|
||||
RepositoryID: 1,
|
||||
Name: "test",
|
||||
}, nil)
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(true, nil)
|
||||
c.tagMgr.On("Delete").Return(nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(true, nil)
|
||||
c.tagMgr.On("Delete", mock.Anything, mock.Anything).Return(nil)
|
||||
err := c.ctl.Delete(nil, 1)
|
||||
c.Require().NotNil(err)
|
||||
c.True(errors.IsErr(err, errors.PreconditionCode))
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestUpdate() {
|
||||
c.tagMgr.On("Update").Return(nil)
|
||||
mock.OnAnything(c.tagMgr, "Update").Return(nil)
|
||||
err := c.ctl.Update(nil, &Tag{
|
||||
Tag: tag.Tag{
|
||||
RepositoryID: 1,
|
||||
@ -184,14 +184,14 @@ func (c *controllerTestSuite) TestUpdate() {
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestDeleteTags() {
|
||||
c.tagMgr.On("Get").Return(&tag.Tag{
|
||||
c.tagMgr.On("Get", mock.Anything, mock.Anything).Return(&tag.Tag{
|
||||
RepositoryID: 1,
|
||||
}, nil)
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(&pkg_artifact.Artifact{
|
||||
ID: 1,
|
||||
}, nil)
|
||||
c.immutableMtr.On("Match").Return(false, nil)
|
||||
c.tagMgr.On("Delete").Return(nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(false, nil)
|
||||
c.tagMgr.On("Delete", mock.Anything, mock.Anything).Return(nil)
|
||||
ids := []int64{1, 2, 3, 4}
|
||||
err := c.ctl.DeleteTags(nil, ids)
|
||||
c.Require().Nil(err)
|
||||
@ -218,7 +218,7 @@ func (c *controllerTestSuite) TestAssembleTag() {
|
||||
}
|
||||
|
||||
c.artMgr.On("Get", mock.Anything, mock.Anything).Return(art, nil)
|
||||
c.immutableMtr.On("Match").Return(true, nil)
|
||||
mock.OnAnything(c.immutableMtr, "Match").Return(true, nil)
|
||||
tag := c.ctl.assembleTag(nil, tg, option)
|
||||
c.Require().NotNil(tag)
|
||||
c.Equal(tag.ID, tg.ID)
|
||||
|
@ -178,17 +178,17 @@ func (c *controller) Count(ctx context.Context, query *q.Query) (int64, error) {
|
||||
func (c *controller) Delete(ctx context.Context, id int) error {
|
||||
// cleanup project member with the user
|
||||
if err := c.memberMgr.DeleteMemberByUserID(ctx, id); err != nil {
|
||||
return errors.UnknownError(err).WithMessage("delete user failed, user id: %v, cannot delete project user member, error:%v", id, err)
|
||||
return errors.UnknownError(err).WithMessagef("delete user failed, user id: %v, cannot delete project user member, error:%v", id, err)
|
||||
}
|
||||
// delete oidc metadata under the user
|
||||
if lib.GetAuthMode(ctx) == common.OIDCAuth {
|
||||
if err := c.oidcMetaMgr.DeleteByUserID(ctx, id); err != nil {
|
||||
return errors.UnknownError(err).WithMessage("delete user failed, user id: %v, cannot delete oidc user, error:%v", id, err)
|
||||
return errors.UnknownError(err).WithMessagef("delete user failed, user id: %v, cannot delete oidc user, error:%v", id, err)
|
||||
}
|
||||
}
|
||||
gdprSetting, err := config.GDPRSetting(ctx)
|
||||
if err != nil {
|
||||
return errors.UnknownError(err).WithMessage("failed to load GDPR setting: %v", err)
|
||||
return errors.UnknownError(err).WithMessagef("failed to load GDPR setting: %v", err)
|
||||
}
|
||||
|
||||
if gdprSetting.AuditLogs {
|
||||
|
@ -81,7 +81,7 @@ func (c *controller) Update(ctx context.Context, id int, groupName string) error
|
||||
return err
|
||||
}
|
||||
if len(ug) == 0 {
|
||||
return errors.NotFoundError(nil).WithMessage("the user group with id %v is not found", id)
|
||||
return errors.NotFoundError(nil).WithMessagef("the user group with id %v is not found", id)
|
||||
}
|
||||
return c.mgr.UpdateName(ctx, id, groupName)
|
||||
}
|
||||
@ -90,10 +90,10 @@ func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, er
|
||||
if group.GroupType == common.LDAPGroupType {
|
||||
ldapGroup, err := auth.SearchGroup(ctx, group.LdapGroupDN)
|
||||
if err == ldap.ErrNotFound || ldapGroup == nil {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("LDAP Group DN is not found: DN:%v", group.LdapGroupDN)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("LDAP Group DN is not found: DN:%v", group.LdapGroupDN)
|
||||
}
|
||||
if err == ldap.ErrDNSyntax {
|
||||
return 0, errors.BadRequestError(nil).WithMessage("invalid DN syntax. DN: %v", group.LdapGroupDN)
|
||||
return 0, errors.BadRequestError(nil).WithMessagef("invalid DN syntax. DN: %v", group.LdapGroupDN)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -102,7 +102,7 @@ func (c *controller) Create(ctx context.Context, group model.UserGroup) (int, er
|
||||
id, err := c.mgr.Create(ctx, group)
|
||||
if err != nil && err == usergroup.ErrDupUserGroup {
|
||||
return 0, errors.ConflictError(nil).
|
||||
WithMessage("duplicate user group, group name:%v, group type: %v, ldap group DN: %v",
|
||||
WithMessagef("duplicate user group, group name:%v, group type: %v, ldap group DN: %v",
|
||||
group.GroupName, group.GroupType, group.LdapGroupDN)
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user