mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-25 11:46:43 +01:00
Merge branch 'main' into fix/invalid-repo-artifacts-404
This commit is contained in:
commit
e3beda9a3d
36
.github/workflows/CI.yml
vendored
36
.github/workflows/CI.yml
vendored
@ -5,7 +5,7 @@ env:
|
||||
POSTGRESQL_USR: postgres
|
||||
POSTGRESQL_PWD: root123
|
||||
POSTGRESQL_DATABASE: registry
|
||||
DOCKER_COMPOSE_VERSION: 1.23.0
|
||||
DOCKER_COMPOSE_VERSION: 2.27.1
|
||||
HARBOR_ADMIN: admin
|
||||
HARBOR_ADMIN_PASSWD: Harbor12345
|
||||
CORE_SECRET: tempString
|
||||
@ -41,10 +41,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -66,7 +66,7 @@ jobs:
|
||||
env
|
||||
#sudo apt install -y xvfb
|
||||
#xvfb-run ls
|
||||
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/v${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
chmod +x docker-compose
|
||||
sudo mv docker-compose /usr/local/bin
|
||||
IP=`hostname -I | awk '{print $1}'`
|
||||
@ -89,7 +89,7 @@ jobs:
|
||||
bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP
|
||||
df -h
|
||||
- name: Codecov For BackEnd
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/profile.cov
|
||||
flags: unittests
|
||||
@ -102,10 +102,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -131,7 +131,7 @@ jobs:
|
||||
df -h
|
||||
#sudo apt install -y xvfb
|
||||
#xvfb-run ls
|
||||
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/v${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
chmod +x docker-compose
|
||||
sudo mv docker-compose /usr/local/bin
|
||||
- name: install
|
||||
@ -157,10 +157,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -186,7 +186,7 @@ jobs:
|
||||
df -h
|
||||
#sudo apt install -y xvfb
|
||||
#xvfb-run ls
|
||||
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/v${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
chmod +x docker-compose
|
||||
sudo mv docker-compose /usr/local/bin
|
||||
- name: install
|
||||
@ -212,10 +212,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -240,7 +240,7 @@ jobs:
|
||||
df -h
|
||||
#sudo apt install -y xvfb
|
||||
#xvfb-run ls
|
||||
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/v${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
chmod +x docker-compose
|
||||
sudo mv docker-compose /usr/local/bin
|
||||
- name: install
|
||||
@ -265,10 +265,10 @@ jobs:
|
||||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@ -292,7 +292,7 @@ jobs:
|
||||
df -h
|
||||
#sudo apt install -y xvfb
|
||||
#xvfb-run ls
|
||||
curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
curl -L https://github.com/docker/compose/releases/download/v${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
|
||||
chmod +x docker-compose
|
||||
sudo mv docker-compose /usr/local/bin
|
||||
IP=`hostname -I | awk '{print $1}'`
|
||||
@ -331,7 +331,7 @@ jobs:
|
||||
bash ./tests/showtime.sh ./tests/ci/ui_ut_run.sh
|
||||
df -h
|
||||
- name: Codecov For UI
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
|
||||
flags: unittests
|
||||
|
2
.github/workflows/auto_assign_prs.yml
vendored
2
.github/workflows/auto_assign_prs.yml
vendored
@ -13,6 +13,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set the author of a PR as the assignee
|
||||
uses: kentaro-m/auto-assign-action@v1.2.6
|
||||
uses: kentaro-m/auto-assign-action@v2.0.0
|
||||
with:
|
||||
configuration-path: ".github/auto-assignees.yml"
|
||||
|
4
.github/workflows/build-package.yml
vendored
4
.github/workflows/build-package.yml
vendored
@ -23,10 +23,10 @@ jobs:
|
||||
with:
|
||||
version: '430.0.0'
|
||||
- run: gcloud info
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- name: Setup Docker
|
||||
uses: docker-practice/actions-setup-docker@master
|
||||
|
3
.github/workflows/codeql-analysis.yml
vendored
3
.github/workflows/codeql-analysis.yml
vendored
@ -47,5 +47,8 @@ jobs:
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
# to make sure autobuild success, specifify golang version in go.mod
|
||||
# https://github.com/github/codeql/issues/15647#issuecomment-2003768106
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
2
.github/workflows/conformance_test.yml
vendored
2
.github/workflows/conformance_test.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.5
|
||||
go-version: 1.22.3
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
2
.github/workflows/nightly-trivy-scan.yml
vendored
2
.github/workflows/nightly-trivy-scan.yml
vendored
@ -12,7 +12,7 @@ jobs:
|
||||
matrix:
|
||||
# maintain the versions of harbor that need to be actively
|
||||
# security scanned
|
||||
versions: [dev, v2.10.0-dev]
|
||||
versions: [dev, v2.11.0-dev]
|
||||
# list of images that need to be scanned
|
||||
images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare]
|
||||
permissions:
|
||||
|
6
.github/workflows/publish_release.yml
vendored
6
.github/workflows/publish_release.yml
vendored
@ -60,7 +60,7 @@ jobs:
|
||||
docker load -i ./harbor/harbor.${{ env.BASE_TAG }}.tar.gz
|
||||
images="$(docker images --format "{{.Repository}}" --filter=reference='goharbor/*:${{ env.BASE_TAG }}' | xargs)"
|
||||
source tools/release/release_utils.sh
|
||||
publishImages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} ${{ secrets.DOCKER_HUB_USERNAME }} ${{ secrets.DOCKER_HUB_PASSWORD }} $images
|
||||
publishImages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} "${{ secrets.DOCKER_HUB_USERNAME }}" "${{ secrets.DOCKER_HUB_PASSWORD }}" $images
|
||||
publishPackages ${{ env.CUR_TAG }} ${{ env.BASE_TAG }} ${{ github.actor }} ${{ secrets.GITHUB_TOKEN }} $images
|
||||
- name: Generate release notes
|
||||
run: |
|
||||
@ -68,7 +68,7 @@ jobs:
|
||||
source tools/release/release_utils.sh && generateReleaseNotes ${{ env.CUR_TAG }} ${{ env.PRE_TAG }} ${{ secrets.GITHUB_TOKEN }} $release_notes_path
|
||||
echo "RELEASE_NOTES_PATH=$release_notes_path" >> $GITHUB_ENV
|
||||
- name: RC Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: ${{ env.PRERELEASE == 'true' }}
|
||||
with:
|
||||
body_path: ${{ env.RELEASE_NOTES_PATH }}
|
||||
@ -77,7 +77,7 @@ jobs:
|
||||
${{ env.OFFLINE_PACKAGE_PATH }}.asc
|
||||
${{ env.MD5SUM_PATH }}
|
||||
- name: GA Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: ${{ env.PRERELEASE == 'false' }}
|
||||
with:
|
||||
body_path: ${{ env.RELEASE_NOTES_PATH }}
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -56,3 +56,5 @@ src/server/v2.0/models/
|
||||
src/server/v2.0/restapi/
|
||||
.editorconfig
|
||||
|
||||
harborclient/
|
||||
openapi-generator-cli.jar
|
||||
|
@ -164,7 +164,8 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
|
||||
| 2.7 | 1.19.4 |
|
||||
| 2.8 | 1.20.6 |
|
||||
| 2.9 | 1.21.3 |
|
||||
| 2.10 | 1.21.5 |
|
||||
| 2.10 | 1.21.8 |
|
||||
| 2.11 | 1.22.3 |
|
||||
|
||||
|
||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||
@ -275,7 +276,7 @@ To build the code, please refer to [build](https://goharbor.io/docs/edge/build-c
|
||||
|
||||
**Note**: from v2.0, Harbor uses [go-swagger](https://github.com/go-swagger/go-swagger) to generate API server from Swagger 2.0 (aka [OpenAPI 2.0](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)). To add or change the APIs, first update the `api/v2.0/swagger.yaml` file, then run `make gen_apis` to generate the API server, finally, implement or update the API handlers in `src/server/v2.0/handler` package.
|
||||
|
||||
As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add `//go:generate mockery xxx` comment with mockery command in the subpackages of `src/testing`, then run `make gen_mocks` to generate mocks.
|
||||
As now Harbor uses `controller/manager/dao` programming model, we suggest to use [testify mock](https://github.com/stretchr/testify/blob/master/mock/doc.go) to test `controller` and `manager`. Harbor integrates [mockery](https://github.com/vektra/mockery) to generate mocks for golang interfaces using the testify mock package. To generate mocks for the interface, first add mock config in the `src/.mockery.yaml`, then run `make gen_mocks` to generate mocks.
|
||||
|
||||
### Keep sync with upstream
|
||||
|
||||
|
16
Makefile
16
Makefile
@ -104,11 +104,13 @@ PREPARE_VERSION_NAME=versions
|
||||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.8.3-patch-redis
|
||||
TRIVYVERSION=v0.47.0
|
||||
TRIVYADAPTERVERSION=v0.30.19
|
||||
TRIVYVERSION=v0.51.2
|
||||
TRIVYADAPTERVERSION=v0.31.2
|
||||
|
||||
# version of registry for pulling the source code
|
||||
REGISTRY_SRC_TAG=v2.8.3
|
||||
# source of upstream distribution code
|
||||
DISTRIBUTION_SRC=https://github.com/distribution/distribution.git
|
||||
|
||||
# dependency binaries
|
||||
REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry
|
||||
@ -140,7 +142,7 @@ GOINSTALL=$(GOCMD) install
|
||||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=golang:1.21.5
|
||||
GOBUILDIMAGE=golang:1.22.3
|
||||
GOBUILDPATHINCONTAINER=/harbor
|
||||
|
||||
# go build
|
||||
@ -312,13 +314,13 @@ gen_apis: lint_apis
|
||||
|
||||
|
||||
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
||||
MOCKERY_VERSION=v2.35.4
|
||||
MOCKERY=$(RUNCONTAINER) ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_VERSION=v2.43.2
|
||||
MOCKERY=$(RUNCONTAINER)/src ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
||||
|
||||
gen_mocks:
|
||||
$(call prepare_docker_image,${MOCKERY_IMAGENAME},${MOCKERY_VERSION},${MOCKERY_IMAGE_BUILD_CMD})
|
||||
${MOCKERY} go generate ./...
|
||||
${MOCKERY} mockery
|
||||
|
||||
mocks_check: gen_mocks
|
||||
@echo checking mocks...
|
||||
@ -388,7 +390,7 @@ build:
|
||||
exit 1; \
|
||||
fi
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile $(BUILDTARGET) -e DEVFLAG=$(DEVFLAG) -e GOBUILDIMAGE=$(GOBUILDIMAGE) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e REGISTRY_SRC_TAG=$(REGISTRY_SRC_TAG) -e DISTRIBUTION_SRC=$(DISTRIBUTION_SRC)\
|
||||
-e TRIVYVERSION=$(TRIVYVERSION) -e TRIVYADAPTERVERSION=$(TRIVYADAPTERVERSION) \
|
||||
-e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e BUILDBIN=$(BUILDBIN) \
|
||||
|
@ -16,9 +16,9 @@ Patch releases are based on the major/minor release branch, the release cadence
|
||||
### Minor Release Support Matrix
|
||||
| Version | Supported |
|
||||
|----------------| ------------------ |
|
||||
| Harbor v2.11.x | :white_check_mark: |
|
||||
| Harbor v2.10.x | :white_check_mark: |
|
||||
| Harbor v2.9.x | :white_check_mark: |
|
||||
| Harbor v2.8.x | :white_check_mark: |
|
||||
|
||||
### Upgrade path and support policy
|
||||
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.
|
||||
|
@ -991,6 +991,12 @@ paths:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_sbom_overview
|
||||
in: query
|
||||
description: Specify whether the SBOM overview is included in returning artifacts, when this option is true, the SBOM overview will be included in the response
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_signature
|
||||
in: query
|
||||
description: Specify whether the signature is included inside the tags of the returning artifacts. Only works when setting "with_tag=true"
|
||||
@ -1096,6 +1102,12 @@ paths:
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_sbom_overview
|
||||
in: query
|
||||
description: Specify whether the SBOM overview is included in returning artifact, when this option is true, the SBOM overview will be included in the response
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_accessory
|
||||
in: query
|
||||
description: Specify whether the accessories are included of the returning artifacts.
|
||||
@ -1164,6 +1176,11 @@ paths:
|
||||
- $ref: '#/parameters/projectName'
|
||||
- $ref: '#/parameters/repositoryName'
|
||||
- $ref: '#/parameters/reference'
|
||||
- name: scanType
|
||||
in: body
|
||||
required: false
|
||||
schema:
|
||||
$ref: '#/definitions/ScanType'
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
@ -1175,6 +1192,8 @@ paths:
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/stop:
|
||||
@ -1189,6 +1208,12 @@ paths:
|
||||
- $ref: '#/parameters/projectName'
|
||||
- $ref: '#/parameters/repositoryName'
|
||||
- $ref: '#/parameters/reference'
|
||||
- name: scanType
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/ScanType'
|
||||
description: 'The scan type: Vulnerabilities, SBOM'
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
@ -1200,6 +1225,8 @@ paths:
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/{report_id}/log:
|
||||
@ -1226,6 +1253,8 @@ paths:
|
||||
description: Successfully get scan log file
|
||||
schema:
|
||||
type: string
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
@ -1432,7 +1461,7 @@ paths:
|
||||
in: path
|
||||
description: The type of addition.
|
||||
type: string
|
||||
enum: [build_history, values.yaml, readme.md, dependencies]
|
||||
enum: [build_history, values.yaml, readme.md, dependencies, sbom]
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
@ -1451,6 +1480,8 @@ paths:
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/labels:
|
||||
@ -4798,6 +4829,8 @@ paths:
|
||||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/schedules:
|
||||
@ -6431,6 +6464,14 @@ responses:
|
||||
type: string
|
||||
schema:
|
||||
$ref: '#/definitions/Errors'
|
||||
'422':
|
||||
description: Unsupported Type
|
||||
headers:
|
||||
X-Request-Id:
|
||||
description: The ID of the corresponding request for the response
|
||||
type: string
|
||||
schema:
|
||||
$ref: '#/definitions/Errors'
|
||||
'500':
|
||||
description: Internal server error
|
||||
headers:
|
||||
@ -6592,6 +6633,9 @@ definitions:
|
||||
scan_overview:
|
||||
$ref: '#/definitions/ScanOverview'
|
||||
description: The overview of the scan result.
|
||||
sbom_overview:
|
||||
$ref: '#/definitions/SBOMOverview'
|
||||
description: The overview of the generating SBOM progress
|
||||
accessories:
|
||||
type: array
|
||||
items:
|
||||
@ -6743,6 +6787,37 @@ definitions:
|
||||
description: 'The scan overview attached in the metadata of tag'
|
||||
additionalProperties:
|
||||
$ref: '#/definitions/NativeReportSummary'
|
||||
SBOMOverview:
|
||||
type: object
|
||||
description: 'The generate SBOM overview information'
|
||||
properties:
|
||||
start_time:
|
||||
type: string
|
||||
format: date-time
|
||||
description: 'The start time of the generating sbom report task'
|
||||
example: '2006-01-02T14:04:05Z'
|
||||
end_time:
|
||||
type: string
|
||||
format: date-time
|
||||
description: 'The end time of the generating sbom report task'
|
||||
example: '2006-01-02T15:04:05Z'
|
||||
scan_status:
|
||||
type: string
|
||||
description: 'The status of the generating SBOM task'
|
||||
sbom_digest:
|
||||
type: string
|
||||
description: 'The digest of the generated SBOM accessory'
|
||||
report_id:
|
||||
type: string
|
||||
description: 'id of the native scan report'
|
||||
example: '5f62c830-f996-11e9-957f-0242c0a89008'
|
||||
duration:
|
||||
type: integer
|
||||
format: int64
|
||||
description: 'Time in seconds required to create the report'
|
||||
example: 300
|
||||
scanner:
|
||||
$ref: '#/definitions/Scanner'
|
||||
NativeReportSummary:
|
||||
type: object
|
||||
description: 'The summary for the native report'
|
||||
@ -7761,7 +7836,7 @@ definitions:
|
||||
properties:
|
||||
resource:
|
||||
type: string
|
||||
description: The resource of the access. Possible resources are *, artifact, artifact-addition, artifact-label, audit-log, catalog, configuration, distribution, garbage-collection, helm-chart, helm-chart-version, helm-chart-version-label, immutable-tag, label, ldap-user, log, member, metadata, notification-policy, preheat-instance, preheat-policy, project, quota, registry, replication, replication-adapter, replication-policy, repository, robot, scan, scan-all, scanner, system-volumes, tag, tag-retention, user, user-group or "" (for self-reference).
|
||||
description: The resource of the access. Possible resources are listed here for system and project level https://github.com/goharbor/harbor/blob/main/src/common/rbac/const.go
|
||||
action:
|
||||
type: string
|
||||
description: The action of the access. Possible actions are *, pull, push, create, read, update, delete, list, operate, scanner-pull and stop.
|
||||
@ -8368,6 +8443,11 @@ definitions:
|
||||
default: ""
|
||||
description: Indicate the healthy of the registration
|
||||
example: "healthy"
|
||||
capabilities:
|
||||
type: object
|
||||
description: Indicates the capabilities of the scanner, e.g. support_vulnerability or support_sbom.
|
||||
additionalProperties: True
|
||||
example: {"support_vulnerability": true, "support_sbom": true}
|
||||
|
||||
ScannerRegistrationReq:
|
||||
type: object
|
||||
@ -8450,6 +8530,12 @@ definitions:
|
||||
ScannerCapability:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
description: |
|
||||
Specify the type of scanner capability, like vulnerability or sbom
|
||||
x-omitempty: false
|
||||
example: "sbom"
|
||||
consumes_mime_types:
|
||||
type: array
|
||||
items:
|
||||
@ -9911,3 +9997,10 @@ definitions:
|
||||
items:
|
||||
type: string
|
||||
description: Links of the vulnerability
|
||||
ScanType:
|
||||
type: object
|
||||
properties:
|
||||
scan_type:
|
||||
type: string
|
||||
description: 'The scan type for the scan request. Two options are currently supported, vulnerability and sbom'
|
||||
enum: [ vulnerability, sbom ]
|
BIN
icons/sbom.png
Normal file
BIN
icons/sbom.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 118 KiB |
@ -16,6 +16,18 @@ https:
|
||||
# The path of cert and key files for nginx
|
||||
certificate: /your/certificate/path
|
||||
private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
|
||||
# # Uncomment following will enable tls communication between all harbor components
|
||||
# internal_tls:
|
||||
@ -23,8 +35,7 @@ https:
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
# # enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
@ -87,6 +98,10 @@ trivy:
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
skip_update: false
|
||||
#
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: false
|
||||
#
|
||||
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
@ -100,6 +115,11 @@ trivy:
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
insecure: false
|
||||
#
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: 5m0s
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
@ -154,7 +174,7 @@ log:
|
||||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.10.0
|
||||
_version: 2.11.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
@ -238,7 +258,7 @@ proxy:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differenciate different harbor services
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
@ -291,6 +311,6 @@ cache:
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# # the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
|
43
make/migrations/postgresql/0140_2.11.0_schema.up.sql
Normal file
43
make/migrations/postgresql/0140_2.11.0_schema.up.sql
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
table artifact:
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
type varchar(255) NOT NULL,
|
||||
media_type varchar(255) NOT NULL,
|
||||
manifest_media_type varchar(255) NOT NULL,
|
||||
artifact_type varchar(255) NOT NULL,
|
||||
project_id int NOT NULL,
|
||||
repository_id int NOT NULL,
|
||||
repository_name varchar(255) NOT NULL,
|
||||
digest varchar(255) NOT NULL,
|
||||
size bigint,
|
||||
push_time timestamp default CURRENT_TIMESTAMP,
|
||||
pull_time timestamp,
|
||||
extra_attrs text,
|
||||
annotations jsonb,
|
||||
CONSTRAINT unique_artifact UNIQUE (repository_id, digest)
|
||||
*/
|
||||
|
||||
/*
|
||||
Add new column artifact_type for artifact table to work with oci-spec v1.1.0 list referrer api
|
||||
*/
|
||||
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS artifact_type varchar(255);
|
||||
|
||||
/*
|
||||
set value for artifact_type
|
||||
then set column artifact_type as not null
|
||||
*/
|
||||
UPDATE artifact SET artifact_type = media_type WHERE artifact_type IS NULL;
|
||||
|
||||
ALTER TABLE artifact ALTER COLUMN artifact_type SET NOT NULL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sbom_report
|
||||
(
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
uuid VARCHAR(64) UNIQUE NOT NULL,
|
||||
artifact_id INT NOT NULL,
|
||||
registration_uuid VARCHAR(64) NOT NULL,
|
||||
mime_type VARCHAR(256) NOT NULL,
|
||||
media_type VARCHAR(256) NOT NULL,
|
||||
report JSON,
|
||||
UNIQUE(artifact_id, registration_uuid, mime_type, media_type)
|
||||
);
|
@ -178,7 +178,7 @@ _build_registry:
|
||||
rm -rf $(DOCKERFILEPATH_REG)/binary && mkdir -p $(DOCKERFILEPATH_REG)/binary && \
|
||||
$(call _get_binary, $(REGISTRYURL), $(DOCKERFILEPATH_REG)/binary/registry); \
|
||||
else \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) && cd - ; \
|
||||
cd $(DOCKERFILEPATH_REG) && $(DOCKERFILEPATH_REG)/builder $(REGISTRY_SRC_TAG) $(DISTRIBUTION_SRC) && cd - ; \
|
||||
fi
|
||||
@echo "building registry container for photon..."
|
||||
@chmod 655 $(DOCKERFILEPATH_REG)/binary/registry && $(DOCKERBUILD_WITH_PULL_PARA) --build-arg harbor_base_image_version=$(BASEIMAGETAG) --build-arg harbor_base_namespace=$(BASEIMAGENAMESPACE) -f $(DOCKERFILEPATH_REG)/$(DOCKERFILENAME_REG) -t $(DOCKERIMAGENAME_REG):$(VERSIONTAG) .
|
||||
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
||||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.10.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.11.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
@ -2,4 +2,4 @@ import os
|
||||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0'}
|
@ -23,6 +23,12 @@ https:
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
@ -31,6 +37,8 @@ https:
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
@ -38,13 +46,9 @@ https:
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if internal_tls.strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ internal_tls.strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
@ -52,8 +56,6 @@ internal_tls:
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
# # enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
|
21
make/photon/prepare/migrations/version_2_11_0/__init__.py
Normal file
21
make/photon/prepare/migrations/version_2_11_0/__init__.py
Normal file
@ -0,0 +1,21 @@
|
||||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.11.0'
|
||||
down_revisions = ['2.10.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
autoescape = select_autoescape()
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
737
make/photon/prepare/migrations/version_2_11_0/harbor.yml.jinja
Normal file
737
make/photon/prepare/migrations/version_2_11_0/harbor.yml.jinja
Normal file
@ -0,0 +1,737 @@
|
||||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
{% if ip_family is defined %}
|
||||
ip_family:
|
||||
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
{% if ip_family.ipv6 is defined %}
|
||||
ipv6:
|
||||
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||
{% else %}
|
||||
ipv6:
|
||||
enabled: false
|
||||
{% endif %}
|
||||
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
{% if ip_family.ipv4 is defined %}
|
||||
ipv4:
|
||||
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||
{% else %}
|
||||
ipv4:
|
||||
enabled: true
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_lifetime is defined %}
|
||||
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||
{% else %}
|
||||
conn_max_lifetime: 5m
|
||||
{% endif %}
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_idle_time is defined %}
|
||||
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||
{% else %}
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_lifetime: 5m
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
{% if storage_service.redirect.disabled is defined %}
|
||||
disable: {{ storage_service.redirect.disabled | lower}}
|
||||
{% else %}
|
||||
disable: {{ storage_service.redirect.disable | lower}}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disable: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
{% if trivy.skip_java_db_update is defined %}
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||
{% else %}
|
||||
skip_java_db_update: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.offline_scan is defined %}
|
||||
offline_scan: {{ trivy.offline_scan | lower }}
|
||||
{% else %}
|
||||
offline_scan: false
|
||||
{% endif %}
|
||||
#
|
||||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||
{% if trivy.security_check is defined %}
|
||||
security_check: {{ trivy.security_check }}
|
||||
{% else %}
|
||||
security_check: vuln
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.timeout is defined %}
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: {{ trivy.timeout}}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
# skip_java_db_update: false
|
||||
# #
|
||||
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# # It would work if all the dependencies are in local.
|
||||
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
# offline_scan: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # timeout The duration to wait for scan completion.
|
||||
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
# timeout: 5m0s
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
{% if jobservice.job_loggers is defined %}
|
||||
job_loggers:
|
||||
{% for job_logger in jobservice.job_loggers %}
|
||||
- {{job_logger}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
{% endif %}
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
{% if jobservice.logger_sweeper_duration is defined %}
|
||||
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||
{% else %}
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
# HTTP client timeout for webhook job
|
||||
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||
{% else %}
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 3
|
||||
# HTTP client timeout for webhook job
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.11.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
{% endif %}
|
||||
|
||||
{% if redis is defined %}
|
||||
redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
{% if redis.registry_db_index is defined %}
|
||||
registry_db_index: {{ redis.registry_db_index }}
|
||||
{% else %}
|
||||
# # registry_db_index: 1
|
||||
{% endif %}
|
||||
{% if redis.jobservice_db_index is defined %}
|
||||
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||
{% else %}
|
||||
# # jobservice_db_index: 2
|
||||
{% endif %}
|
||||
{% if redis.trivy_db_index is defined %}
|
||||
trivy_db_index: {{ redis.trivy_db_index }}
|
||||
{% else %}
|
||||
# # trivy_db_index: 5
|
||||
{% endif %}
|
||||
{% if redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
{% if external_redis.username is defined %}
|
||||
username: {{ external_redis.username }}
|
||||
{% else %}
|
||||
# username:
|
||||
{% endif %}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% if external_redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if external_redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
# # username:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to differentiate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# # timeout is in seconds
|
||||
# timeout: 10
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # # timeout is in seconds
|
||||
# # timeout: 10
|
||||
{% endif %}
|
||||
|
||||
# enable purge _upload directories
|
||||
{% if upload_purging is defined %}
|
||||
upload_purging:
|
||||
enabled: {{ upload_purging.enabled | lower}}
|
||||
age: {{ upload_purging.age }}
|
||||
interval: {{ upload_purging.interval }}
|
||||
dryrun: {{ upload_purging.dryrun | lower}}
|
||||
{% else %}
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
{% endif %}
|
||||
|
||||
# Cache layer related config
|
||||
{% if cache is defined %}
|
||||
cache:
|
||||
enabled: {{ cache.enabled | lower}}
|
||||
expire_hours: {{ cache.expire_hours }}
|
||||
{% else %}
|
||||
cache:
|
||||
enabled: false
|
||||
expire_hours: 24
|
||||
{% endif %}
|
||||
|
||||
# Harbor core configurations
|
||||
# Uncomment to enable the following harbor core related configuration items.
|
||||
{% if core is defined %}
|
||||
core:
|
||||
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# by default is implemented by db but you can switch the updation via redis which
|
||||
# can improve the performance of high concurrent pushing to the same project,
|
||||
# and reduce the database connections spike and occupies.
|
||||
# By redis will bring up some delay for quota usage updation for display, so only
|
||||
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
quota_update_provider: {{ core.quota_update_provider }}
|
||||
{% else %}
|
||||
# core:
|
||||
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# # by default is implemented by db but you can switch the updation via redis which
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
{% endif %}
|
@ -50,8 +50,12 @@ http {
|
||||
include /etc/nginx/conf.d/*.server.conf;
|
||||
|
||||
server {
|
||||
{% if ip_family.ipv4.enabled %}
|
||||
listen 8443 ssl;
|
||||
{% endif %}
|
||||
{% if ip_family.ipv6.enabled %}
|
||||
listen [::]:8443 ssl;
|
||||
{% endif %}
|
||||
# server_name harbordomain.com;
|
||||
server_tokens off;
|
||||
# SSL
|
||||
@ -60,7 +64,7 @@ http {
|
||||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
{% if internal_tls.strong_ssl_ciphers %}
|
||||
{% if strong_ssl_ciphers %}
|
||||
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
|
||||
{% else %}
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
|
@ -16,14 +16,19 @@ http {
|
||||
|
||||
server {
|
||||
{% if internal_tls.enabled %}
|
||||
#ip_family
|
||||
{% if ip_family.ipv4.enabled %}
|
||||
listen 8443 ssl;
|
||||
{% endif %}
|
||||
{% if ip_family.ipv6.enabled %}
|
||||
listen [::]:8443 ssl;
|
||||
{% endif %}
|
||||
# SSL
|
||||
ssl_certificate /etc/harbor/tls/portal.crt;
|
||||
ssl_certificate_key /etc/harbor/tls/portal.key;
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
{% if internal_tls.strong_ssl_ciphers %}
|
||||
{% if strong_ssl_ciphers %}
|
||||
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
|
||||
{% else %}
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
|
@ -10,6 +10,7 @@ SCANNER_TRIVY_VULN_TYPE=os,library
|
||||
SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
|
||||
SCANNER_TRIVY_IGNORE_UNFIXED={{trivy_ignore_unfixed}}
|
||||
SCANNER_TRIVY_SKIP_UPDATE={{trivy_skip_update}}
|
||||
SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE={{trivy_skip_java_db_update}}
|
||||
SCANNER_TRIVY_OFFLINE_SCAN={{trivy_offline_scan}}
|
||||
SCANNER_TRIVY_SECURITY_CHECKS={{trivy_security_check}}
|
||||
SCANNER_TRIVY_GITHUB_TOKEN={{trivy_github_token}}
|
||||
|
@ -212,6 +212,7 @@ def parse_yaml_config(config_file_path, with_trivy):
|
||||
trivy_configs = configs.get("trivy") or {}
|
||||
config_dict['trivy_github_token'] = trivy_configs.get("github_token") or ''
|
||||
config_dict['trivy_skip_update'] = trivy_configs.get("skip_update") or False
|
||||
config_dict['trivy_skip_java_db_update'] = trivy_configs.get("skip_java_db_update") or False
|
||||
config_dict['trivy_offline_scan'] = trivy_configs.get("offline_scan") or False
|
||||
config_dict['trivy_security_check'] = trivy_configs.get("security_check") or 'vuln'
|
||||
config_dict['trivy_ignore_unfixed'] = trivy_configs.get("ignore_unfixed") or False
|
||||
@ -298,6 +299,20 @@ def parse_yaml_config(config_file_path, with_trivy):
|
||||
external_database=config_dict['external_database'])
|
||||
else:
|
||||
config_dict['internal_tls'] = InternalTLS()
|
||||
# the configure item apply to internal and external tls communication
|
||||
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
|
||||
# but it is more reasonable to configure it in https_config
|
||||
if https_config:
|
||||
config_dict['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers')
|
||||
else:
|
||||
config_dict['strong_ssl_ciphers'] = False
|
||||
|
||||
if internal_tls_config:
|
||||
config_dict['strong_ssl_ciphers'] = config_dict['strong_ssl_ciphers'] or internal_tls_config.get('strong_ssl_ciphers')
|
||||
|
||||
|
||||
# ip_family config
|
||||
config_dict['ip_family'] = configs.get('ip_family') or {'ipv4': {'enabled': True}, 'ipv6': {'enabled': False}}
|
||||
|
||||
# metric configs
|
||||
metric_config = configs.get('metric')
|
||||
|
@ -27,6 +27,12 @@ def read_conf(path):
|
||||
with open(path) as f:
|
||||
try:
|
||||
d = yaml.safe_load(f)
|
||||
# the strong_ssl_ciphers configure item apply to internal and external tls communication
|
||||
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
|
||||
# but it will move to https section after migration
|
||||
https_config = d.get("https") or {}
|
||||
internal_tls = d.get('internal_tls') or {}
|
||||
d['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers') or internal_tls.get('strong_ssl_ciphers')
|
||||
except Exception as e:
|
||||
click.echo("parse config file err, make sure your harbor config version is above 1.8.0", e)
|
||||
exit(-1)
|
||||
|
@ -63,7 +63,9 @@ def render_nginx_template(config_dict):
|
||||
ssl_cert=SSL_CERT_PATH,
|
||||
ssl_cert_key=SSL_CERT_KEY_PATH,
|
||||
internal_tls=config_dict['internal_tls'],
|
||||
metric=config_dict['metric'])
|
||||
metric=config_dict['metric'],
|
||||
strong_ssl_ciphers=config_dict['strong_ssl_ciphers'],
|
||||
ip_family=config_dict['ip_family'])
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
||||
|
||||
else:
|
||||
|
@ -14,5 +14,8 @@ def prepare_portal(config_dict):
|
||||
str(portal_conf_template_path),
|
||||
portal_conf,
|
||||
internal_tls=config_dict['internal_tls'],
|
||||
ip_family=config_dict['ip_family'],
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID)
|
||||
gid=DEFAULT_GID,
|
||||
strong_ssl_ciphers=config_dict['strong_ssl_ciphers']
|
||||
)
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.21.5
|
||||
FROM golang:1.22.3
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
@ -7,7 +7,13 @@ if [ -z $1 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z $2 ]; then
|
||||
error "Please set the 'distribution_src' variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
DISTRIBUTION_SRC="$2"
|
||||
|
||||
set -e
|
||||
|
||||
@ -20,7 +26,7 @@ cur=$PWD
|
||||
|
||||
# the temp folder to store distribution source code...
|
||||
TEMP=`mktemp -d ${TMPDIR-/tmp}/distribution.XXXXXX`
|
||||
git clone -b $VERSION https://github.com/distribution/distribution.git $TEMP
|
||||
git clone -b $VERSION $DISTRIBUTION_SRC $TEMP
|
||||
|
||||
# add patch redis
|
||||
cd $TEMP
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.21.5
|
||||
FROM golang:1.22.3
|
||||
|
||||
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
|
@ -19,7 +19,7 @@ TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
||||
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
|
||||
cd $TEMP; git checkout $VERSION; cd -
|
||||
|
||||
echo "Building Trivy adapter binary based on golang:1.21.5..."
|
||||
echo "Building Trivy adapter binary based on golang:1.22.3..."
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
|
||||
|
6
package-lock.json
generated
6
package-lock.json
generated
@ -1,6 +0,0 @@
|
||||
{
|
||||
"name": "harbor",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {}
|
||||
}
|
524
src/.mockery.yaml
Normal file
524
src/.mockery.yaml
Normal file
@ -0,0 +1,524 @@
|
||||
with-expecter: false
|
||||
outpkg: "{{.PackageName}}"
|
||||
mockname: "{{.InterfaceName}}"
|
||||
filename: "{{.InterfaceName | snakecase}}.go"
|
||||
packages:
|
||||
# controller related mocks
|
||||
github.com/goharbor/harbor/src/controller/artifact:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/artifact
|
||||
github.com/goharbor/harbor/src/controller/blob:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/blob
|
||||
github.com/goharbor/harbor/src/controller/project:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/project
|
||||
github.com/goharbor/harbor/src/controller/quota:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/quota
|
||||
github.com/goharbor/harbor/src/controller/scan:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scan
|
||||
Checker:
|
||||
config:
|
||||
dir: testing/controller/scan
|
||||
github.com/goharbor/harbor/src/controller/scanner:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scanner
|
||||
github.com/goharbor/harbor/src/controller/replication:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/replication
|
||||
github.com/goharbor/harbor/src/controller/replication/flow:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: controller/replication
|
||||
outpkg: replication
|
||||
mockname: flowController
|
||||
filename: mock_flow_controller_test.go
|
||||
registryAdapter:
|
||||
config:
|
||||
dir: controller/replication/flow
|
||||
outpkg: flow
|
||||
mockname: mockAdapter
|
||||
filename: mock_adapter_test.go
|
||||
github.com/goharbor/harbor/src/controller/robot:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/robot
|
||||
github.com/goharbor/harbor/src/controller/proxy:
|
||||
interfaces:
|
||||
RemoteInterface:
|
||||
config:
|
||||
dir: testing/controller/proxy
|
||||
github.com/goharbor/harbor/src/controller/retention:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/retention
|
||||
github.com/goharbor/harbor/src/controller/config:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/config
|
||||
github.com/goharbor/harbor/src/controller/user:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/user
|
||||
github.com/goharbor/harbor/src/controller/repository:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/repository
|
||||
github.com/goharbor/harbor/src/controller/purge:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/purge
|
||||
github.com/goharbor/harbor/src/controller/jobservice:
|
||||
interfaces:
|
||||
SchedulerController:
|
||||
config:
|
||||
dir: testing/controller/jobservice
|
||||
github.com/goharbor/harbor/src/controller/systemartifact:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/systemartifact
|
||||
github.com/goharbor/harbor/src/controller/scandataexport:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/scandataexport
|
||||
github.com/goharbor/harbor/src/controller/task:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/task
|
||||
ExecutionController:
|
||||
config:
|
||||
dir: testing/controller/task
|
||||
github.com/goharbor/harbor/src/controller/webhook:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/webhook
|
||||
github.com/goharbor/harbor/src/controller/securityhub:
|
||||
interfaces:
|
||||
Controller:
|
||||
config:
|
||||
dir: testing/controller/securityhub
|
||||
|
||||
# jobservice related mocks
|
||||
github.com/goharbor/harbor/src/jobservice/mgt:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: jobservice/mgt
|
||||
outpkg: mgt
|
||||
mockname: MockManager
|
||||
filename: mock_manager.go
|
||||
github.com/goharbor/harbor/src/jobservice/period:
|
||||
interfaces:
|
||||
Scheduler:
|
||||
config:
|
||||
dir: jobservice/period
|
||||
outpkg: period
|
||||
mockname: MockScheduler
|
||||
filename: mock_scheduler.go
|
||||
inpackage: True
|
||||
|
||||
# common and lib related mocks
|
||||
github.com/goharbor/harbor/src/lib/cache:
|
||||
interfaces:
|
||||
Cache:
|
||||
configs:
|
||||
- dir: lib/cache
|
||||
outpkg: cache
|
||||
mockname: mockCache
|
||||
filename: mock_cache_test.go
|
||||
inpackage: True
|
||||
- dir: testing/lib/cache
|
||||
Iterator:
|
||||
config:
|
||||
dir: testing/lib/cache
|
||||
github.com/goharbor/harbor/src/lib/orm:
|
||||
interfaces:
|
||||
Creator:
|
||||
config:
|
||||
dir: testing/lib/orm
|
||||
github.com/goharbor/harbor/src/lib/config:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/lib/config
|
||||
github.com/goharbor/harbor/src/common/job:
|
||||
interfaces:
|
||||
Client:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockJobserviceClient
|
||||
filename: mock_jobservice_client_test.go
|
||||
github.com/goharbor/harbor/src/common/security:
|
||||
interfaces:
|
||||
Context:
|
||||
config:
|
||||
dir: testing/common/security
|
||||
|
||||
# pkg related mocks
|
||||
github.com/goharbor/harbor/src/pkg/artifact:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/artifact
|
||||
github.com/goharbor/harbor/src/pkg/blob:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/blob
|
||||
github.com/goharbor/harbor/src/pkg/project:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/project
|
||||
github.com/goharbor/harbor/src/pkg/project/metadata:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/project/metadata
|
||||
github.com/goharbor/harbor/src/pkg/quota:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/quota
|
||||
github.com/goharbor/harbor/src/pkg/quota/driver:
|
||||
interfaces:
|
||||
Driver:
|
||||
config:
|
||||
dir: testing/pkg/quota/driver
|
||||
github.com/goharbor/harbor/src/pkg/scan:
|
||||
interfaces:
|
||||
Handler:
|
||||
config:
|
||||
dir: testing/pkg/scan
|
||||
github.com/goharbor/harbor/src/pkg/scan/report:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/report
|
||||
github.com/goharbor/harbor/src/pkg/scan/rest/v1:
|
||||
config:
|
||||
dir: testing/pkg/scan/rest/v1
|
||||
all: True
|
||||
github.com/goharbor/harbor/src/pkg/scan/scanner:
|
||||
config:
|
||||
dir: testing/pkg/scan/scanner
|
||||
all: True
|
||||
github.com/goharbor/harbor/src/pkg/scheduler:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: pkg/scheduler
|
||||
outpkg: scheduler
|
||||
mockname: mockDAO
|
||||
filename: mock_dao_test.go
|
||||
inpackage: True
|
||||
Scheduler:
|
||||
config:
|
||||
dir: testing/pkg/scheduler
|
||||
github.com/goharbor/harbor/src/pkg/task:
|
||||
interfaces:
|
||||
Manager:
|
||||
configs:
|
||||
- dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockTaskManager
|
||||
filename: mock_task_manager_test.go
|
||||
inpackage: True
|
||||
- dir: testing/pkg/task
|
||||
SweepManager:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockSweepManager
|
||||
filename: mock_sweep_manager_test.go
|
||||
inpackage: True
|
||||
ExecutionManager:
|
||||
config:
|
||||
dir: testing/pkg/task
|
||||
github.com/goharbor/harbor/src/pkg/task/dao:
|
||||
interfaces:
|
||||
TaskDAO:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockTaskDAO
|
||||
filename: mock_task_dao_test.go
|
||||
ExecutionDAO:
|
||||
config:
|
||||
dir: pkg/task
|
||||
outpkg: task
|
||||
mockname: mockExecutionDAO
|
||||
filename: mock_execution_dao_test.go
|
||||
github.com/goharbor/harbor/src/pkg/user:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/user
|
||||
github.com/goharbor/harbor/src/pkg/user/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/user/dao
|
||||
github.com/goharbor/harbor/src/pkg/oidc:
|
||||
interfaces:
|
||||
MetaManager:
|
||||
config:
|
||||
dir: testing/pkg/oidc
|
||||
github.com/goharbor/harbor/src/pkg/oidc/dao:
|
||||
interfaces:
|
||||
MetaDAO:
|
||||
config:
|
||||
dir: testing/pkg/oidc/dao
|
||||
github.com/goharbor/harbor/src/pkg/rbac:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/rbac
|
||||
github.com/goharbor/harbor/src/pkg/rbac/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/rbac/dao
|
||||
github.com/goharbor/harbor/src/pkg/robot:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/robot
|
||||
github.com/goharbor/harbor/src/pkg/robot/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/robot/dao
|
||||
github.com/goharbor/harbor/src/pkg/repository:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/repository
|
||||
github.com/goharbor/harbor/src/pkg/repository/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/repository/dao
|
||||
github.com/goharbor/harbor/src/pkg/notification/policy:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/notification/policy
|
||||
github.com/goharbor/harbor/src/pkg/notification/policy/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/notification/policy/dao
|
||||
github.com/goharbor/harbor/src/pkg/immutable/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/immutable/dao
|
||||
github.com/goharbor/harbor/src/pkg/ldap:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/ldap
|
||||
github.com/goharbor/harbor/src/pkg/allowlist:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/allowlist
|
||||
github.com/goharbor/harbor/src/pkg/allowlist/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/allowlist/dao
|
||||
github.com/goharbor/harbor/src/pkg/reg:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/reg
|
||||
github.com/goharbor/harbor/src/pkg/reg/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/reg/dao
|
||||
github.com/goharbor/harbor/src/pkg/reg/adapter:
|
||||
interfaces:
|
||||
Factory:
|
||||
config:
|
||||
dir: controller/replication/flow
|
||||
outpkg: flow
|
||||
mockname: mockFactory
|
||||
filename: mock_adapter_factory_test.go
|
||||
Adapter:
|
||||
config:
|
||||
dir: testing/pkg/reg/adapter
|
||||
github.com/goharbor/harbor/src/pkg/replication:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/replication
|
||||
github.com/goharbor/harbor/src/pkg/replication/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/replication/dao
|
||||
github.com/goharbor/harbor/src/pkg/label:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/label
|
||||
github.com/goharbor/harbor/src/pkg/label/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/label/dao
|
||||
github.com/goharbor/harbor/src/pkg/joblog:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/joblog
|
||||
github.com/goharbor/harbor/src/pkg/joblog/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/joblog/dao
|
||||
github.com/goharbor/harbor/src/pkg/accessory:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/accessory
|
||||
github.com/goharbor/harbor/src/pkg/accessory/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/accessory/dao
|
||||
github.com/goharbor/harbor/src/pkg/accessory/model:
|
||||
interfaces:
|
||||
Accessory:
|
||||
config:
|
||||
dir: testing/pkg/accessory/model
|
||||
github.com/goharbor/harbor/src/pkg/audit:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/audit
|
||||
github.com/goharbor/harbor/src/pkg/audit/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/audit/dao
|
||||
github.com/goharbor/harbor/src/pkg/systemartifact:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact
|
||||
Selector:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact/cleanup
|
||||
outpkg: cleanup
|
||||
github.com/goharbor/harbor/src/pkg/systemartifact/dao:
|
||||
interfaces:
|
||||
DAO:
|
||||
config:
|
||||
dir: testing/pkg/systemartifact/dao
|
||||
github.com/goharbor/harbor/src/pkg/cached/manifest/redis:
|
||||
interfaces:
|
||||
CachedManager:
|
||||
config:
|
||||
dir: testing/pkg/cached/manifest/redis
|
||||
github.com/goharbor/harbor/src/pkg/scan/export:
|
||||
interfaces:
|
||||
FilterProcessor:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
ArtifactDigestCalculator:
|
||||
config:
|
||||
dir: testing/pkg/scan/export
|
||||
github.com/goharbor/harbor/src/pkg/scan/sbom:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/scan/sbom
|
||||
github.com/goharbor/harbor/src/pkg/registry:
|
||||
interfaces:
|
||||
Client:
|
||||
config:
|
||||
dir: testing/pkg/registry
|
||||
filename: fake_registry_client.go
|
||||
github.com/goharbor/harbor/src/pkg/member:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/member
|
||||
filename: fake_member_manager.go
|
||||
github.com/goharbor/harbor/src/pkg/usergroup:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/usergroup
|
||||
filename: fake_usergroup_manager.go
|
||||
github.com/goharbor/harbor/src/pkg/jobmonitor:
|
||||
config:
|
||||
dir: testing/pkg/jobmonitor
|
||||
interfaces:
|
||||
PoolManager:
|
||||
JobServiceMonitorClient:
|
||||
WorkerManager:
|
||||
QueueManager:
|
||||
RedisClient:
|
||||
github.com/goharbor/harbor/src/pkg/queuestatus:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/queuestatus
|
||||
github.com/goharbor/harbor/src/pkg/securityhub:
|
||||
interfaces:
|
||||
Manager:
|
||||
config:
|
||||
dir: testing/pkg/securityhub
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -96,7 +96,7 @@ func main() {
|
||||
)
|
||||
prometheus.MustRegister(harborExporter)
|
||||
if err := harborExporter.ListenAndServe(); err != nil {
|
||||
log.Errorf("Error starting Harbor expoter %s", err)
|
||||
log.Errorf("Error starting Harbor exporter %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -48,20 +48,23 @@ func main() {
|
||||
log.Fatalf("Failed to connect to Database, error: %v\n", err)
|
||||
}
|
||||
defer db.Close()
|
||||
c := make(chan struct{}, 1)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
|
||||
err := db.Ping()
|
||||
for ; err != nil; err = db.Ping() {
|
||||
log.Println("Failed to Ping DB, sleep for 1 second.")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
c <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(30 * time.Second):
|
||||
log.Fatal("Failed to connect DB after 30 seconds, time out. \n")
|
||||
}
|
||||
|
||||
row := db.QueryRow(pgSQLCheckColStmt)
|
||||
var tblCount, colCount int
|
||||
if err := row.Scan(&tblCount, &colCount); err != nil {
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
type contextKey string
|
||||
|
||||
// const variables
|
||||
@ -241,4 +243,7 @@ const (
|
||||
BeegoMaxUploadSizeBytes = "beego_max_upload_size_bytes"
|
||||
// DefaultBeegoMaxUploadSizeBytes sets default max upload size to 128GB
|
||||
DefaultBeegoMaxUploadSizeBytes = 1 << 37
|
||||
|
||||
// Global Leeway used for token validation
|
||||
JwtLeeway = 60 * time.Second
|
||||
)
|
||||
|
@ -51,6 +51,7 @@ const (
|
||||
ResourceRobot = Resource("robot")
|
||||
ResourceNotificationPolicy = Resource("notification-policy")
|
||||
ResourceScan = Resource("scan")
|
||||
ResourceSBOM = Resource("sbom")
|
||||
ResourceScanner = Resource("scanner")
|
||||
ResourceArtifact = Resource("artifact")
|
||||
ResourceTag = Resource("tag")
|
||||
@ -182,6 +183,10 @@ var (
|
||||
{Resource: ResourceScan, Action: ActionRead},
|
||||
{Resource: ResourceScan, Action: ActionStop},
|
||||
|
||||
{Resource: ResourceSBOM, Action: ActionCreate},
|
||||
{Resource: ResourceSBOM, Action: ActionStop},
|
||||
{Resource: ResourceSBOM, Action: ActionRead},
|
||||
|
||||
{Resource: ResourceTag, Action: ActionCreate},
|
||||
{Resource: ResourceTag, Action: ActionList},
|
||||
{Resource: ResourceTag, Action: ActionDelete},
|
||||
|
@ -86,6 +86,9 @@ var (
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionCreate},
|
||||
@ -122,10 +125,7 @@ var (
|
||||
{Resource: rbac.ResourceMember, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceMetadata, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceMetadata, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceMetadata, Action: rbac.ActionUpdate},
|
||||
{Resource: rbac.ResourceMetadata, Action: rbac.ActionDelete},
|
||||
|
||||
{Resource: rbac.ResourceLog, Action: rbac.ActionList},
|
||||
|
||||
@ -169,6 +169,9 @@ var (
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
@ -223,6 +226,7 @@ var (
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
@ -267,6 +271,7 @@ var (
|
||||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
@ -290,6 +295,7 @@ var (
|
||||
{Resource: rbac.ResourceConfiguration, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -313,11 +313,11 @@ func ValidateCronString(cron string) error {
|
||||
// sort.Slice(input, func(i, j int) bool {
|
||||
// return MostMatchSorter(input[i].GroupName, input[j].GroupName, matchWord)
|
||||
// })
|
||||
//
|
||||
// a is the field to be used for sorting, b is the other field, matchWord is the word to be matched
|
||||
// the return value is true if a is less than b
|
||||
// for example, search with "user", input is {"harbor_user", "user", "users, "admin_user"}
|
||||
// it returns with this order {"user", "users", "admin_user", "harbor_user"}
|
||||
|
||||
func MostMatchSorter(a, b string, matchWord string) bool {
|
||||
// exact match always first
|
||||
if a == matchWord {
|
||||
@ -332,3 +332,8 @@ func MostMatchSorter(a, b string, matchWord string) bool {
|
||||
}
|
||||
return len(a) < len(b)
|
||||
}
|
||||
|
||||
// IsLocalPath checks if path is local, includes the empty path
|
||||
func IsLocalPath(path string) bool {
|
||||
return len(path) == 0 || (strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//"))
|
||||
}
|
||||
|
@ -486,3 +486,26 @@ func TestValidateCronString(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsLocalPath(t *testing.T) {
|
||||
type args struct {
|
||||
path string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{"normal test", args{"/harbor/project"}, true},
|
||||
{"failed", args{"www.myexample.com"}, false},
|
||||
{"other_site1", args{"//www.myexample.com"}, false},
|
||||
{"other_site2", args{"https://www.myexample.com"}, false},
|
||||
{"other_site", args{"http://www.myexample.com"}, false},
|
||||
{"empty_path", args{""}, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, IsLocalPath(tt.args.path), "IsLocalPath(%v)", tt.args.path)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -127,10 +127,18 @@ func (a *abstractor) abstractManifestV2Metadata(artifact *artifact.Artifact, con
|
||||
}
|
||||
// use the "manifest.config.mediatype" as the media type of the artifact
|
||||
artifact.MediaType = manifest.Config.MediaType
|
||||
|
||||
if manifest.Annotations[wasm.AnnotationVariantKey] == wasm.AnnotationVariantValue || manifest.Annotations[wasm.AnnotationHandlerKey] == wasm.AnnotationHandlerValue {
|
||||
artifact.MediaType = wasm.MediaType
|
||||
}
|
||||
/*
|
||||
https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers
|
||||
For referrers list, if the artifactType is empty or missing in the image manifest, the value of artifactType MUST be set to the config descriptor mediaType value
|
||||
*/
|
||||
if manifest.ArtifactType != "" {
|
||||
artifact.ArtifactType = manifest.ArtifactType
|
||||
} else {
|
||||
artifact.ArtifactType = manifest.Config.MediaType
|
||||
}
|
||||
|
||||
// set size
|
||||
artifact.Size = int64(len(content)) + manifest.Config.Size
|
||||
@ -153,6 +161,16 @@ func (a *abstractor) abstractIndexMetadata(ctx context.Context, art *artifact.Ar
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers
|
||||
For referrers list, If the artifactType is empty or missing in an index, the artifactType MUST be omitted.
|
||||
*/
|
||||
if index.ArtifactType != "" {
|
||||
art.ArtifactType = index.ArtifactType
|
||||
} else {
|
||||
art.ArtifactType = ""
|
||||
}
|
||||
|
||||
// set annotations
|
||||
art.Annotations = index.Annotations
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
@ -175,7 +176,66 @@ var (
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
|
||||
OCIManifest = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.example.config.v1+json",
|
||||
"digest": "sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03",
|
||||
"size": 123
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
OCIManifestWithArtifactType = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.example.config.v1+json",
|
||||
"digest": "sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03",
|
||||
"size": 123
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
OCIManifestWithEmptyConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
|
||||
"size": 2
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example+type",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"oci.opencontainers.image.created": "2023-01-02T03:04:05Z",
|
||||
"com.example.data": "payload"
|
||||
}
|
||||
}`
|
||||
index = `{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
@ -202,6 +262,34 @@ var (
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
indexWithArtifactType = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.index.v1+json",
|
||||
"artifactType": "application/vnd.food.stand",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 7143,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 7682,
|
||||
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
)
|
||||
|
||||
type abstractorTestSuite struct {
|
||||
@ -267,6 +355,67 @@ func (a *abstractorTestSuite) TestAbstractMetadataOfV2Manifest() {
|
||||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// oci-spec v1
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfOCIManifest() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifest))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1916), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// oci-spec v1.1.0 with artifactType
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfOCIManifestWithArtifactType() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithArtifactType))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example+type", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1966), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// empty config with artifactType
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfV2ManifestWithEmptyConfig() {
|
||||
// v1.MediaTypeImageManifest
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithEmptyConfig))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeEmptyJSON, artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example+type", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1880), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 2)
|
||||
a.Equal("payload", artifact.Annotations["com.example.data"])
|
||||
}
|
||||
|
||||
// OCI index
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfIndex() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageIndex, []byte(index))
|
||||
@ -279,17 +428,41 @@ func (a *abstractorTestSuite) TestAbstractMetadataOfIndex() {
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
err = a.abstractor.AbstractMetadata(nil, artifact)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.MediaType)
|
||||
a.Assert().Equal("", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(668), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Assert().Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
a.Len(artifact.References, 2)
|
||||
}
|
||||
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfIndexWithArtifactType() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageIndex, []byte(indexWithArtifactType))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
a.argMgr.On("GetByDigest", mock.Anything, mock.Anything, mock.Anything).Return(&artifact.Artifact{
|
||||
ID: 2,
|
||||
Size: 10,
|
||||
}, nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.food.stand", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(801), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Assert().Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
a.Len(artifact.References, 2)
|
||||
}
|
||||
|
||||
type unknownManifest struct{}
|
||||
|
||||
func (u *unknownManifest) References() []distribution.Descriptor {
|
||||
|
@ -92,6 +92,7 @@ func parseV1alpha1Icon(artifact *artifact.Artifact, manifest *v1.Manifest, reg r
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer icon.Close()
|
||||
// check the size of the size <= 1MB
|
||||
data, err := io.ReadAll(io.LimitReader(icon, 1<<20))
|
||||
if err != nil {
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/chart"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/cnab"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/image"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/sbom"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/wasm"
|
||||
"github.com/goharbor/harbor/src/controller/event/metadata"
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
@ -57,7 +58,10 @@ import (
|
||||
|
||||
var (
|
||||
// Ctl is a global artifact controller instance
|
||||
Ctl = NewController()
|
||||
Ctl = NewController()
|
||||
skippedContentTypes = map[string]struct{}{
|
||||
"application/vnd.in-toto+json": {},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
@ -73,6 +77,7 @@ var (
|
||||
chart.ArtifactTypeChart: icon.DigestOfIconChart,
|
||||
cnab.ArtifactTypeCNAB: icon.DigestOfIconCNAB,
|
||||
wasm.ArtifactTypeWASM: icon.DigestOfIconWASM,
|
||||
sbom.ArtifactTypeSBOM: icon.DigestOfIconAccSBOM,
|
||||
}
|
||||
)
|
||||
|
||||
@ -111,6 +116,8 @@ type Controller interface {
|
||||
RemoveLabel(ctx context.Context, artifactID int64, labelID int64) (err error)
|
||||
// Walk walks the artifact tree rooted at root, calling walkFn for each artifact in the tree, including root.
|
||||
Walk(ctx context.Context, root *Artifact, walkFn func(*Artifact) error, option *Option) error
|
||||
// HasUnscannableLayer check artifact with digest if has unscannable layer
|
||||
HasUnscannableLayer(ctx context.Context, dgst string) (bool, error)
|
||||
}
|
||||
|
||||
// NewController creates an instance of the default artifact controller
|
||||
@ -324,12 +331,6 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
||||
return err
|
||||
}
|
||||
|
||||
if isAccessory {
|
||||
if err := c.accessoryMgr.DeleteAccessories(ctx, q.New(q.KeyWords{"ArtifactID": art.ID, "Digest": art.Digest})); err != nil && !errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// the child artifact is referenced by some tags, skip
|
||||
if !isRoot && len(art.Tags) > 0 {
|
||||
return nil
|
||||
@ -352,11 +353,26 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
||||
return nil
|
||||
}
|
||||
|
||||
if isAccessory {
|
||||
if err := c.accessoryMgr.DeleteAccessories(ctx, q.New(q.KeyWords{"ArtifactID": art.ID, "Digest": art.Digest})); err != nil && !errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// delete accessories if contains any
|
||||
for _, acc := range art.Accessories {
|
||||
// only hard ref accessory should be removed
|
||||
if acc.IsHard() {
|
||||
if err = c.deleteDeeply(ctx, acc.GetData().ArtifactID, true, true); err != nil {
|
||||
// if this acc artifact has parent(is child), set isRoot to false
|
||||
parents, err := c.artMgr.ListReferences(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
"ChildID": acc.GetData().ArtifactID,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, acc.GetData().ArtifactID, len(parents) == 0, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -369,7 +385,12 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
||||
!errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, reference.ChildID, false, false); err != nil {
|
||||
// if the child artifact is an accessory, set isAccessory to true
|
||||
accs, err := c.accessoryMgr.List(ctx, q.New(q.KeyWords{"ArtifactID": reference.ChildID}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, reference.ChildID, false, len(accs) > 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -743,3 +764,21 @@ func (c *controller) populateAccessories(ctx context.Context, art *Artifact) {
|
||||
}
|
||||
art.Accessories = accs
|
||||
}
|
||||
|
||||
// HasUnscannableLayer check if it is a in-toto sbom, if it contains any blob with a content_type is application/vnd.in-toto+json, then consider as in-toto sbom
|
||||
func (c *controller) HasUnscannableLayer(ctx context.Context, dgst string) (bool, error) {
|
||||
if len(dgst) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
blobs, err := c.blobMgr.GetByArt(ctx, dgst)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, b := range blobs {
|
||||
if _, exist := skippedContentTypes[b.ContentType]; exist {
|
||||
log.Debugf("the artifact with digest %v is unscannable, because it contains content type: %v", dgst, b.ContentType)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
accessorymodel "github.com/goharbor/harbor/src/pkg/accessory/model"
|
||||
basemodel "github.com/goharbor/harbor/src/pkg/accessory/model/base"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/pkg/blob/models"
|
||||
"github.com/goharbor/harbor/src/pkg/label/model"
|
||||
repomodel "github.com/goharbor/harbor/src/pkg/repository/model"
|
||||
model_tag "github.com/goharbor/harbor/src/pkg/tag/model/tag"
|
||||
@ -678,6 +679,29 @@ func (c *controllerTestSuite) TestWalk() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestIsIntoto() {
|
||||
blobs := []*models.Blob{
|
||||
{Digest: "sha256:00000", ContentType: "application/vnd.oci.image.manifest.v1+json"},
|
||||
{Digest: "sha256:22222", ContentType: "application/vnd.oci.image.config.v1+json"},
|
||||
{Digest: "sha256:11111", ContentType: "application/vnd.in-toto+json"},
|
||||
}
|
||||
c.blobMgr.On("GetByArt", mock.Anything, mock.Anything).Return(blobs, nil).Once()
|
||||
isIntoto, err := c.ctl.HasUnscannableLayer(context.Background(), "sha256: 77777")
|
||||
c.Nil(err)
|
||||
c.True(isIntoto)
|
||||
|
||||
blobs2 := []*models.Blob{
|
||||
{Digest: "sha256:00000", ContentType: "application/vnd.oci.image.manifest.v1+json"},
|
||||
{Digest: "sha256:22222", ContentType: "application/vnd.oci.image.config.v1+json"},
|
||||
{Digest: "sha256:11111", ContentType: "application/vnd.oci.image.layer.v1.tar+gzip"},
|
||||
}
|
||||
|
||||
c.blobMgr.On("GetByArt", mock.Anything, mock.Anything).Return(blobs2, nil).Once()
|
||||
isIntoto2, err := c.ctl.HasUnscannableLayer(context.Background(), "sha256: 8888")
|
||||
c.Nil(err)
|
||||
c.False(isIntoto2)
|
||||
}
|
||||
|
||||
func TestControllerTestSuite(t *testing.T) {
|
||||
suite.Run(t, &controllerTestSuite{})
|
||||
}
|
||||
|
@ -80,6 +80,20 @@ func (artifact *Artifact) SetAdditionLink(addition, version string) {
|
||||
artifact.AdditionLinks[addition] = &AdditionLink{HREF: href, Absolute: false}
|
||||
}
|
||||
|
||||
// SetSBOMAdditionLink set the link of SBOM addition
|
||||
func (artifact *Artifact) SetSBOMAdditionLink(sbomDgst string, version string) {
|
||||
if artifact.AdditionLinks == nil {
|
||||
artifact.AdditionLinks = make(map[string]*AdditionLink)
|
||||
}
|
||||
addition := "sboms"
|
||||
projectName, repo := utils.ParseRepository(artifact.RepositoryName)
|
||||
// encode slash as %252F
|
||||
repo = repository.Encode(repo)
|
||||
href := fmt.Sprintf("/api/%s/projects/%s/repositories/%s/artifacts/%s/additions/sbom", version, projectName, repo, sbomDgst)
|
||||
|
||||
artifact.AdditionLinks[addition] = &AdditionLink{HREF: href, Absolute: false}
|
||||
}
|
||||
|
||||
// AdditionLink is a link via that the addition can be fetched
|
||||
type AdditionLink struct {
|
||||
HREF string `json:"href"`
|
||||
|
@ -85,11 +85,11 @@ func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artif
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer blob.Close()
|
||||
content, err := io.ReadAll(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blob.Close()
|
||||
chartDetails, err := p.chartOperator.GetDetails(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -117,7 +117,31 @@ var (
|
||||
}
|
||||
]
|
||||
}`
|
||||
v2ManifestWithUnknownConfig = `{
|
||||
OCIManifestWithUnknownJsonConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.exmaple.config.v1+json",
|
||||
"digest": "sha256:48ef4a53c0770222d9752cd0588431dbda54667046208c79804e34c15c1579cd",
|
||||
"size": 129
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
UnknownJsonConfig = `{
|
||||
"author": "yminer",
|
||||
"architecture": "amd64",
|
||||
"selfdefined": "true"
|
||||
}`
|
||||
OCIManifestWithUnknownConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
@ -141,7 +165,30 @@ var (
|
||||
"newUnspecifiedField": null
|
||||
}
|
||||
}`
|
||||
unknownConfig = `{NHL Peanut Butter on my NHL bagel}`
|
||||
UnknownConfig = `{NHL Peanut Butter on my NHL bagel}`
|
||||
|
||||
OCIManifestWithEmptyConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
|
||||
"size": 2
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example+type",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"oci.opencontainers.image.created": "2023-01-02T03:04:05Z",
|
||||
"com.example.data": "payload"
|
||||
}
|
||||
}`
|
||||
emptyConfig = `{}`
|
||||
)
|
||||
|
||||
type defaultProcessorTestSuite struct {
|
||||
@ -190,6 +237,12 @@ func (d *defaultProcessorTestSuite) TestGetArtifactType() {
|
||||
typee = processor.GetArtifactType(nil, art)
|
||||
d.Equal("IMAGE", typee)
|
||||
|
||||
mediaType = "application/vnd.example.config.v1+json"
|
||||
art = &artifact.Artifact{MediaType: mediaType}
|
||||
processor = &defaultProcessor{}
|
||||
typee = processor.GetArtifactType(nil, art)
|
||||
d.Equal(ArtifactTypeUnknown, typee)
|
||||
|
||||
mediaType = "application/vnd.cncf.helm.chart.config.v1+json"
|
||||
art = &artifact.Artifact{MediaType: mediaType}
|
||||
processor = &defaultProcessor{}
|
||||
@ -229,19 +282,53 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadata() {
|
||||
d.Len(art.ExtraAttrs, 12)
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithUnknownConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(v2ManifestWithUnknownConfig))
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataOfOCIManifesttWithUnknownJsonConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithUnknownJsonConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
configBlob := io.NopCloser(strings.NewReader(unknownConfig))
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(0), configBlob, nil)
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType}
|
||||
err = d.processor.AbstractMetadata(nil, art, content)
|
||||
configBlob := io.NopCloser(strings.NewReader(UnknownJsonConfig))
|
||||
metadata := map[string]interface{}{}
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
d.Require().Nil(err)
|
||||
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.example.config.v1+json"}
|
||||
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(129), configBlob, nil)
|
||||
d.parser.On("Parse", context.TODO(), mock.AnythingOfType("*artifact.Artifact"), mock.AnythingOfType("[]byte")).Return(nil)
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Require().Nil(err)
|
||||
d.Len(art.ExtraAttrs, 0)
|
||||
d.Len(unknownConfig, 35)
|
||||
d.NotEqual(art.ExtraAttrs, len(metadata))
|
||||
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithUnknownConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithUnknownConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
configBlob := io.NopCloser(strings.NewReader(UnknownConfig))
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(0), configBlob, nil)
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.nhl.peanut.butter.bagel"}
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Require().Nil(err)
|
||||
d.Len(art.ExtraAttrs, 0)
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithEmptyConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithEmptyConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.oci.empty.v1+json"}
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Assert().Equal(0, len(art.ExtraAttrs))
|
||||
d.Assert().Equal(2, len(emptyConfig))
|
||||
d.Require().Nil(err)
|
||||
}
|
||||
|
||||
func TestDefaultProcessorTestSuite(t *testing.T) {
|
||||
|
89
src/controller/artifact/processor/sbom/sbom.go
Normal file
89
src/controller/artifact/processor/sbom/sbom.go
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/base"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
)
|
||||
|
||||
const (
|
||||
// ArtifactTypeSBOM is the artifact type for SBOM, it's scope is only used in the processor
|
||||
ArtifactTypeSBOM = "SBOM"
|
||||
// processorMediaType is the media type for SBOM, it's scope is only used to register the processor
|
||||
processorMediaType = "application/vnd.goharbor.harbor.sbom.v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
pc := &Processor{}
|
||||
pc.ManifestProcessor = base.NewManifestProcessor()
|
||||
if err := processor.Register(pc, processorMediaType); err != nil {
|
||||
log.Errorf("failed to register processor for media type %s: %v", processorMediaType, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Processor is the processor for SBOM
|
||||
type Processor struct {
|
||||
*base.ManifestProcessor
|
||||
}
|
||||
|
||||
// AbstractAddition returns the addition for SBOM
|
||||
func (m *Processor) AbstractAddition(_ context.Context, art *artifact.Artifact, _ string) (*processor.Addition, error) {
|
||||
man, _, err := m.RegCli.PullManifest(art.RepositoryName, art.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to pull manifest")
|
||||
}
|
||||
_, payload, err := man.Payload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get payload")
|
||||
}
|
||||
manifest := &v1.Manifest{}
|
||||
if err := json.Unmarshal(payload, manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SBOM artifact should only have one layer
|
||||
if len(manifest.Layers) != 1 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).WithMessage("The sbom is not found")
|
||||
}
|
||||
layerDgst := manifest.Layers[0].Digest.String()
|
||||
_, blob, err := m.RegCli.PullBlob(art.RepositoryName, layerDgst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to pull the blob")
|
||||
}
|
||||
defer blob.Close()
|
||||
content, err := io.ReadAll(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &processor.Addition{
|
||||
Content: content,
|
||||
ContentType: processorMediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetArtifactType the artifact type is used to display the artifact type in the UI
|
||||
func (m *Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeSBOM
|
||||
}
|
166
src/controller/artifact/processor/sbom/sbom_test.go
Normal file
166
src/controller/artifact/processor/sbom/sbom_test.go
Normal file
@ -0,0 +1,166 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/base"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/registry"
|
||||
)
|
||||
|
||||
type SBOMProcessorTestSuite struct {
|
||||
suite.Suite
|
||||
processor *Processor
|
||||
regCli *registry.Client
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) SetupSuite() {
|
||||
suite.regCli = ®istry.Client{}
|
||||
suite.processor = &Processor{
|
||||
&base.ManifestProcessor{
|
||||
RegCli: suite.regCli,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionNormal() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
}]
|
||||
}`
|
||||
sbomContent := "this is a sbom content"
|
||||
reader := strings.NewReader(sbomContent)
|
||||
blobReader := io.NopCloser(reader)
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
suite.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(123), blobReader, nil).Once()
|
||||
addition, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.Nil(err)
|
||||
suite.Equal(sbomContent, string(addition.Content))
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionMultiLayer() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 843,
|
||||
"digest": "sha256:def"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 531,
|
||||
"digest": "sha256:123"
|
||||
}
|
||||
]
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
_, err = suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionPullBlobError() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
]
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
suite.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(123), nil, errors.NotFoundError(fmt.Errorf("not found"))).Once()
|
||||
addition, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
suite.Nil(addition)
|
||||
}
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionNoSBOMLayer() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
}
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
_, err = suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionPullManifestError() {
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(nil, "sha256:123", errors.NotFoundError(fmt.Errorf("not found"))).Once()
|
||||
_, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestGetArtifactType() {
|
||||
suite.Equal(ArtifactTypeSBOM, suite.processor.GetArtifactType(context.Background(), &artifact.Artifact{}))
|
||||
}
|
||||
|
||||
func TestSBOMProcessorTestSuite(t *testing.T) {
|
||||
suite.Run(t, &SBOMProcessorTestSuite{})
|
||||
}
|
@ -24,6 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
sbomprocessor "github.com/goharbor/harbor/src/controller/artifact/processor/sbom"
|
||||
"github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/controller/event/operator"
|
||||
"github.com/goharbor/harbor/src/controller/repository"
|
||||
@ -36,6 +37,8 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg"
|
||||
pkgArt "github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/report"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/sbom"
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
)
|
||||
|
||||
@ -72,6 +75,8 @@ type ArtifactEventHandler struct {
|
||||
execMgr task.ExecutionManager
|
||||
// reportMgr for managing scan reports
|
||||
reportMgr report.Manager
|
||||
// sbomReportMgr
|
||||
sbomReportMgr sbom.Manager
|
||||
// artMgr for managing artifacts
|
||||
artMgr pkgArt.Manager
|
||||
|
||||
@ -258,6 +263,11 @@ func (a *ArtifactEventHandler) onPush(ctx context.Context, event *event.Artifact
|
||||
if err := autoScan(ctx, &artifact.Artifact{Artifact: *event.Artifact}, event.Tags...); err != nil {
|
||||
log.Errorf("scan artifact %s@%s failed, error: %v", event.Artifact.RepositoryName, event.Artifact.Digest, err)
|
||||
}
|
||||
|
||||
log.Debugf("auto generate sbom is triggered for artifact event %+v", event)
|
||||
if err := autoGenSBOM(ctx, &artifact.Artifact{Artifact: *event.Artifact}); err != nil {
|
||||
log.Errorf("generate sbom for artifact %s@%s failed, error: %v", event.Artifact.RepositoryName, event.Artifact.Digest, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
@ -314,6 +324,17 @@ func (a *ArtifactEventHandler) onDelete(ctx context.Context, event *event.Artifa
|
||||
log.Errorf("failed to delete scan reports of artifact %v, error: %v", unrefDigests, err)
|
||||
}
|
||||
|
||||
// delete sbom_report when the subject artifact is deleted
|
||||
if err := sbom.Mgr.DeleteByArtifactID(ctx, event.Artifact.ID); err != nil {
|
||||
log.Errorf("failed to delete sbom reports of artifact ID %v, error: %v", event.Artifact.ID, err)
|
||||
}
|
||||
|
||||
// delete sbom_report when the accessory artifact is deleted
|
||||
if event.Artifact.Type == sbomprocessor.ArtifactTypeSBOM && len(event.Artifact.Digest) > 0 {
|
||||
if err := sbom.Mgr.DeleteByExtraAttr(ctx, v1.MimeTypeSBOMReport, "sbom_digest", event.Artifact.Digest); err != nil {
|
||||
log.Errorf("failed to delete sbom reports of with sbom digest %v, error: %v", event.Artifact.Digest, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -20,7 +20,9 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/controller/scan"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
)
|
||||
|
||||
// autoScan scan artifact when the project of the artifact enable auto scan
|
||||
@ -37,9 +39,26 @@ func autoScan(ctx context.Context, a *artifact.Artifact, tags ...string) error {
|
||||
return orm.WithTransaction(func(ctx context.Context) error {
|
||||
options := []scan.Option{}
|
||||
if len(tags) > 0 {
|
||||
options = append(options, scan.WithTag(tags[0]))
|
||||
options = append(options, scan.WithTag(tags[0]), scan.WithFromEvent(true))
|
||||
}
|
||||
|
||||
return scan.DefaultController.Scan(ctx, a, options...)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-auto-scan"))
|
||||
}
|
||||
|
||||
func autoGenSBOM(ctx context.Context, a *artifact.Artifact) error {
|
||||
proj, err := project.Ctl.Get(ctx, a.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !proj.AutoSBOMGen() {
|
||||
return nil
|
||||
}
|
||||
// transaction here to work with the image index
|
||||
return orm.WithTransaction(func(ctx context.Context) error {
|
||||
options := []scan.Option{}
|
||||
options = append(options, scan.WithScanType(v1.ScanTypeSbom), scan.WithFromEvent(true))
|
||||
log.Debugf("sbom scan controller artifact %+v, options %+v", a, options)
|
||||
return scan.DefaultController.Scan(ctx, a, options...)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-auto-gen-sbom"))
|
||||
}
|
||||
|
@ -95,6 +95,34 @@ func (suite *AutoScanTestSuite) TestAutoScan() {
|
||||
suite.Nil(autoScan(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanSBOM() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
proModels.ProMetaAutoSBOMGen: "true",
|
||||
},
|
||||
}, nil)
|
||||
suite.scanController.On("Scan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
art := &artifact.Artifact{}
|
||||
|
||||
suite.Nil(autoGenSBOM(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanSBOMFalse() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
proModels.ProMetaAutoSBOMGen: "false",
|
||||
},
|
||||
}, nil)
|
||||
|
||||
suite.scanController.On("Scan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
art := &artifact.Artifact{}
|
||||
|
||||
suite.Nil(autoGenSBOM(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanFailed() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
|
@ -216,7 +216,9 @@ func constructReplicationPayload(ctx context.Context, event *event.ReplicationEv
|
||||
|
||||
func getMetadataFromResource(resource string) (namespace, nameAndTag string) {
|
||||
// Usually resource format likes 'library/busybox:v1', but it could be 'busybox:v1' in docker registry
|
||||
meta := strings.Split(resource, "/")
|
||||
// It also could be 'library/bitnami/fluentd:1.13.3-debian-10-r0' so we need to split resource to only 2 parts
|
||||
// possible namespace and image name which may include slashes for example: bitnami/fluentd:1.13.3-debian-10-r0
|
||||
meta := strings.SplitN(resource, "/", 2)
|
||||
if len(meta) == 1 {
|
||||
return "", meta[0]
|
||||
}
|
||||
|
@ -146,3 +146,21 @@ func TestIsLocalRegistry(t *testing.T) {
|
||||
}
|
||||
assert.False(t, isLocalRegistry(reg2))
|
||||
}
|
||||
|
||||
func TestReplicationHandler_ShortResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("busybox:v1")
|
||||
assert.Equal(t, "", namespace)
|
||||
assert.Equal(t, "busybox:v1", resource)
|
||||
}
|
||||
|
||||
func TestReplicationHandler_NormalResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("library/busybox:v1")
|
||||
assert.Equal(t, "library", namespace)
|
||||
assert.Equal(t, "busybox:v1", resource)
|
||||
}
|
||||
|
||||
func TestReplicationHandler_LongResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("library/bitnami/fluentd:1.13.3-debian-10-r0")
|
||||
assert.Equal(t, "library", namespace)
|
||||
assert.Equal(t, "bitnami/fluentd:1.13.3-debian-10-r0", resource)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/controller/event/handler/util"
|
||||
eventModel "github.com/goharbor/harbor/src/controller/event/model"
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/controller/scan"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
@ -104,6 +105,9 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
||||
RepoFullName: event.Artifact.Repository,
|
||||
RepoType: repoType,
|
||||
},
|
||||
Scan: &eventModel.Scan{
|
||||
ScanType: event.ScanType,
|
||||
},
|
||||
},
|
||||
Operator: event.Operator,
|
||||
}
|
||||
@ -138,17 +142,29 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Add scan overview
|
||||
summaries, err := scan.DefaultController.GetSummary(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
scanSummaries := map[string]interface{}{}
|
||||
if event.ScanType == v1.ScanTypeVulnerability {
|
||||
scanSummaries, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
}
|
||||
}
|
||||
|
||||
sbomOverview := map[string]interface{}{}
|
||||
if event.ScanType == v1.ScanTypeSbom {
|
||||
sbomOverview, err = scan.DefaultController.GetSummary(ctx, art, event.ScanType, []string{v1.MimeTypeSBOMReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
}
|
||||
}
|
||||
|
||||
// Add scan overview and sbom overview
|
||||
resource := &model.Resource{
|
||||
Tag: event.Artifact.Tag,
|
||||
Digest: event.Artifact.Digest,
|
||||
ResourceURL: resURL,
|
||||
ScanOverview: summaries,
|
||||
ScanOverview: scanSummaries,
|
||||
SBOMOverview: sbomOverview,
|
||||
}
|
||||
payload.EventData.Resources = append(payload.EventData.Resources, resource)
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
// ScanImageMetaData defines meta data of image scanning event
|
||||
type ScanImageMetaData struct {
|
||||
Artifact *v1.Artifact
|
||||
ScanType string
|
||||
Status string
|
||||
Operator string
|
||||
}
|
||||
@ -55,6 +56,7 @@ func (si *ScanImageMetaData) Resolve(evt *event.Event) error {
|
||||
Artifact: si.Artifact,
|
||||
OccurAt: time.Now(),
|
||||
Operator: si.Operator,
|
||||
ScanType: si.ScanType,
|
||||
}
|
||||
|
||||
evt.Topic = topic
|
||||
|
@ -74,3 +74,9 @@ type RetentionRule struct {
|
||||
// Selector attached to the rule for filtering scope (e.g: repositories or namespaces)
|
||||
ScopeSelectors map[string][]*rule.Selector `json:"scope_selectors,omitempty"`
|
||||
}
|
||||
|
||||
// Scan describes scan infos
|
||||
type Scan struct {
|
||||
// ScanType the scan type
|
||||
ScanType string `json:"scan_type,omitempty"`
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ func (p *PushArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
||||
ResourceType: "artifact"}
|
||||
|
||||
if len(p.Tags) == 0 {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
auditLog.Resource = fmt.Sprintf("%s@%s",
|
||||
p.Artifact.RepositoryName, p.Artifact.Digest)
|
||||
} else {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
@ -188,7 +188,7 @@ func (p *PullArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
||||
ResourceType: "artifact"}
|
||||
|
||||
if len(p.Tags) == 0 {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
auditLog.Resource = fmt.Sprintf("%s@%s",
|
||||
p.Artifact.RepositoryName, p.Artifact.Digest)
|
||||
} else {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
@ -222,7 +222,7 @@ func (d *DeleteArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
||||
Operation: rbac.ActionDelete.String(),
|
||||
Username: d.Operator,
|
||||
ResourceType: "artifact",
|
||||
Resource: fmt.Sprintf("%s:%s", d.Artifact.RepositoryName, d.Artifact.Digest)}
|
||||
Resource: fmt.Sprintf("%s@%s", d.Artifact.RepositoryName, d.Artifact.Digest)}
|
||||
return auditLog, nil
|
||||
}
|
||||
|
||||
@ -289,6 +289,7 @@ func (d *DeleteTagEvent) String() string {
|
||||
// ScanImageEvent is scanning image related event data to publish
|
||||
type ScanImageEvent struct {
|
||||
EventType string
|
||||
ScanType string
|
||||
Artifact *v1.Artifact
|
||||
OccurAt time.Time
|
||||
Operator string
|
||||
|
@ -69,6 +69,10 @@ var (
|
||||
path: "./icons/wasm.png",
|
||||
resize: true,
|
||||
},
|
||||
icon.DigestOfIconAccSBOM: {
|
||||
path: "./icons/sbom.png",
|
||||
resize: true,
|
||||
},
|
||||
icon.DigestOfIconDefault: {
|
||||
path: "./icons/default.png",
|
||||
resize: true,
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
commonmodels "github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/core/auth"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/q"
|
||||
@ -45,7 +46,7 @@ type Controller interface {
|
||||
// Count get the total amount of project members
|
||||
Count(ctx context.Context, projectNameOrID interface{}, query *q.Query) (int, error)
|
||||
// IsProjectAdmin judges if the user is a project admin of any project
|
||||
IsProjectAdmin(ctx context.Context, memberID int) (bool, error)
|
||||
IsProjectAdmin(ctx context.Context, member commonmodels.User) (bool, error)
|
||||
}
|
||||
|
||||
// Request - Project Member Request
|
||||
@ -261,8 +262,8 @@ func (c *controller) Delete(ctx context.Context, projectNameOrID interface{}, me
|
||||
return c.mgr.Delete(ctx, p.ProjectID, memberID)
|
||||
}
|
||||
|
||||
func (c *controller) IsProjectAdmin(ctx context.Context, memberID int) (bool, error) {
|
||||
members, err := c.projectMgr.ListAdminRolesOfUser(ctx, memberID)
|
||||
func (c *controller) IsProjectAdmin(ctx context.Context, member commonmodels.User) (bool, error) {
|
||||
members, err := c.projectMgr.ListAdminRolesOfUser(ctx, member)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ func (suite *MemberControllerTestSuite) TestAddProjectMemberWithUserGroup() {
|
||||
|
||||
func (suite *MemberControllerTestSuite) TestIsProjectAdmin() {
|
||||
mock.OnAnything(suite.projectMgr, "ListAdminRolesOfUser").Return([]models.Member{models.Member{ID: 2, ProjectID: 2}}, nil)
|
||||
ok, err := suite.controller.IsProjectAdmin(context.Background(), 2)
|
||||
ok, err := suite.controller.IsProjectAdmin(context.Background(), comModels.User{UserID: 1})
|
||||
suite.NoError(err)
|
||||
suite.True(ok)
|
||||
}
|
||||
|
@ -172,9 +172,6 @@ func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo,
|
||||
return false, nil, err
|
||||
}
|
||||
if !exist || desc == nil {
|
||||
go func() {
|
||||
c.local.DeleteManifest(remoteRepo, art.Tag)
|
||||
}()
|
||||
return false, nil, errors.NotFoundError(fmt.Errorf("repo %v, tag %v not found", art.Repository, art.Tag))
|
||||
}
|
||||
|
||||
@ -220,11 +217,6 @@ func (c *controller) ProxyManifest(ctx context.Context, art lib.ArtifactInfo, re
|
||||
ref := getReference(art)
|
||||
man, dig, err := remote.Manifest(remoteRepo, ref)
|
||||
if err != nil {
|
||||
if errors.IsNotFoundErr(err) {
|
||||
go func() {
|
||||
c.local.DeleteManifest(remoteRepo, art.Tag)
|
||||
}()
|
||||
}
|
||||
return man, err
|
||||
}
|
||||
ct, _, err := man.Payload()
|
||||
|
@ -154,11 +154,8 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
||||
func (c *controller) markError(ctx context.Context, executionID int64, err error) {
|
||||
logger := log.GetLogger(ctx)
|
||||
// try to stop the execution first in case that some tasks are already created
|
||||
if err := c.execMgr.StopAndWait(ctx, executionID, 10*time.Second); err != nil {
|
||||
logger.Errorf("failed to stop the execution %d: %v", executionID, err)
|
||||
}
|
||||
if err := c.execMgr.MarkError(ctx, executionID, err.Error()); err != nil {
|
||||
logger.Errorf("failed to mark error for the execution %d: %v", executionID, err)
|
||||
if e := c.execMgr.StopAndWaitWithError(ctx, executionID, 10*time.Second, err); e != nil {
|
||||
logger.Errorf("failed to stop the execution %d: %v", executionID, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,8 +75,7 @@ func (r *replicationTestSuite) TestStart() {
|
||||
// got error when running the replication flow
|
||||
r.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
r.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{}, nil)
|
||||
r.execMgr.On("StopAndWait", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.execMgr.On("MarkError", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.execMgr.On("StopAndWaitWithError", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.flowCtl.On("Start", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("error"))
|
||||
r.ormCreator.On("Create").Return(nil)
|
||||
id, err = r.ctl.Start(context.Background(), &repctlmodel.Policy{Enabled: true}, nil, task.ExecutionTriggerManual)
|
||||
|
@ -24,6 +24,3 @@ type registryAdapter interface {
|
||||
adapter.Adapter
|
||||
adapter.ArtifactRegistry
|
||||
}
|
||||
|
||||
//go:generate mockery --dir . --name registryAdapter --output . --outpkg flow --filename mock_adapter_test.go --structname mockAdapter
|
||||
//go:generate mockery --dir ../../../pkg/reg/adapter --name Factory --output . --outpkg flow --filename mock_adapter_factory_test.go --structname mockFactory
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
@ -18,6 +18,10 @@ type mockFactory struct {
|
||||
func (_m *mockFactory) AdapterPattern() *model.AdapterPattern {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AdapterPattern")
|
||||
}
|
||||
|
||||
var r0 *model.AdapterPattern
|
||||
if rf, ok := ret.Get(0).(func() *model.AdapterPattern); ok {
|
||||
r0 = rf()
|
||||
@ -34,6 +38,10 @@ func (_m *mockFactory) AdapterPattern() *model.AdapterPattern {
|
||||
func (_m *mockFactory) Create(_a0 *model.Registry) (adapter.Adapter, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Create")
|
||||
}
|
||||
|
||||
var r0 adapter.Adapter
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*model.Registry) (adapter.Adapter, error)); ok {
|
||||
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
@ -21,6 +21,10 @@ type mockAdapter struct {
|
||||
func (_m *mockAdapter) BlobExist(repository string, digest string) (bool, error) {
|
||||
ret := _m.Called(repository, digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for BlobExist")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) (bool, error)); ok {
|
||||
@ -45,6 +49,10 @@ func (_m *mockAdapter) BlobExist(repository string, digest string) (bool, error)
|
||||
func (_m *mockAdapter) CanBeMount(digest string) (bool, string, error) {
|
||||
ret := _m.Called(digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CanBeMount")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 string
|
||||
var r2 error
|
||||
@ -76,6 +84,10 @@ func (_m *mockAdapter) CanBeMount(digest string) (bool, string, error) {
|
||||
func (_m *mockAdapter) DeleteManifest(repository string, reference string) error {
|
||||
ret := _m.Called(repository, reference)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteManifest")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(repository, reference)
|
||||
@ -90,6 +102,10 @@ func (_m *mockAdapter) DeleteManifest(repository string, reference string) error
|
||||
func (_m *mockAdapter) DeleteTag(repository string, tag string) error {
|
||||
ret := _m.Called(repository, tag)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteTag")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(repository, tag)
|
||||
@ -104,6 +120,10 @@ func (_m *mockAdapter) DeleteTag(repository string, tag string) error {
|
||||
func (_m *mockAdapter) FetchArtifacts(filters []*model.Filter) ([]*model.Resource, error) {
|
||||
ret := _m.Called(filters)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FetchArtifacts")
|
||||
}
|
||||
|
||||
var r0 []*model.Resource
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func([]*model.Filter) ([]*model.Resource, error)); ok {
|
||||
@ -130,6 +150,10 @@ func (_m *mockAdapter) FetchArtifacts(filters []*model.Filter) ([]*model.Resourc
|
||||
func (_m *mockAdapter) HealthCheck() (string, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HealthCheck")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (string, error)); ok {
|
||||
@ -154,6 +178,10 @@ func (_m *mockAdapter) HealthCheck() (string, error) {
|
||||
func (_m *mockAdapter) Info() (*model.RegistryInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Info")
|
||||
}
|
||||
|
||||
var r0 *model.RegistryInfo
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (*model.RegistryInfo, error)); ok {
|
||||
@ -180,6 +208,10 @@ func (_m *mockAdapter) Info() (*model.RegistryInfo, error) {
|
||||
func (_m *mockAdapter) ListTags(repository string) ([]string, error) {
|
||||
ret := _m.Called(repository)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListTags")
|
||||
}
|
||||
|
||||
var r0 []string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok {
|
||||
@ -206,6 +238,10 @@ func (_m *mockAdapter) ListTags(repository string) ([]string, error) {
|
||||
func (_m *mockAdapter) ManifestExist(repository string, reference string) (bool, *distribution.Descriptor, error) {
|
||||
ret := _m.Called(repository, reference)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ManifestExist")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 *distribution.Descriptor
|
||||
var r2 error
|
||||
@ -239,6 +275,10 @@ func (_m *mockAdapter) ManifestExist(repository string, reference string) (bool,
|
||||
func (_m *mockAdapter) MountBlob(srcRepository string, digest string, dstRepository string) error {
|
||||
ret := _m.Called(srcRepository, digest, dstRepository)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MountBlob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, string) error); ok {
|
||||
r0 = rf(srcRepository, digest, dstRepository)
|
||||
@ -253,6 +293,10 @@ func (_m *mockAdapter) MountBlob(srcRepository string, digest string, dstReposit
|
||||
func (_m *mockAdapter) PrepareForPush(_a0 []*model.Resource) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PrepareForPush")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*model.Resource) error); ok {
|
||||
r0 = rf(_a0)
|
||||
@ -267,6 +311,10 @@ func (_m *mockAdapter) PrepareForPush(_a0 []*model.Resource) error {
|
||||
func (_m *mockAdapter) PullBlob(repository string, digest string) (int64, io.ReadCloser, error) {
|
||||
ret := _m.Called(repository, digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullBlob")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 io.ReadCloser
|
||||
var r2 error
|
||||
@ -300,6 +348,10 @@ func (_m *mockAdapter) PullBlob(repository string, digest string) (int64, io.Rea
|
||||
func (_m *mockAdapter) PullBlobChunk(repository string, digest string, blobSize int64, start int64, end int64) (int64, io.ReadCloser, error) {
|
||||
ret := _m.Called(repository, digest, blobSize, start, end)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullBlobChunk")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 io.ReadCloser
|
||||
var r2 error
|
||||
@ -340,6 +392,10 @@ func (_m *mockAdapter) PullManifest(repository string, reference string, acceptt
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullManifest")
|
||||
}
|
||||
|
||||
var r0 distribution.Manifest
|
||||
var r1 string
|
||||
var r2 error
|
||||
@ -373,6 +429,10 @@ func (_m *mockAdapter) PullManifest(repository string, reference string, acceptt
|
||||
func (_m *mockAdapter) PushBlob(repository string, digest string, size int64, blob io.Reader) error {
|
||||
ret := _m.Called(repository, digest, size, blob)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushBlob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, int64, io.Reader) error); ok {
|
||||
r0 = rf(repository, digest, size, blob)
|
||||
@ -387,6 +447,10 @@ func (_m *mockAdapter) PushBlob(repository string, digest string, size int64, bl
|
||||
func (_m *mockAdapter) PushBlobChunk(repository string, digest string, size int64, chunk io.Reader, start int64, end int64, location string) (string, int64, error) {
|
||||
ret := _m.Called(repository, digest, size, chunk, start, end, location)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushBlobChunk")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 int64
|
||||
var r2 error
|
||||
@ -418,6 +482,10 @@ func (_m *mockAdapter) PushBlobChunk(repository string, digest string, size int6
|
||||
func (_m *mockAdapter) PushManifest(repository string, reference string, mediaType string, payload []byte) (string, error) {
|
||||
ret := _m.Called(repository, reference, mediaType, payload)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushManifest")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, string, []byte) (string, error)); ok {
|
||||
|
@ -1,17 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package replication
|
||||
|
||||
//go:generate mockery --dir ./flow --name Controller --output . --outpkg replication --filename mock_flow_controller_test.go --structname flowController
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package replication
|
||||
|
||||
@ -21,6 +21,10 @@ type flowController struct {
|
||||
func (_m *flowController) Start(ctx context.Context, executionID int64, policy *model.Policy, resource *regmodel.Resource) error {
|
||||
ret := _m.Called(ctx, executionID, policy, resource)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Start")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, *model.Policy, *regmodel.Resource) error); ok {
|
||||
r0 = rf(ctx, executionID, policy, resource)
|
||||
|
@ -280,12 +280,8 @@ func (r *defaultController) TriggerRetentionExec(ctx context.Context, policyID i
|
||||
if num, err := r.launcher.Launch(ctx, p, id, dryRun); err != nil {
|
||||
logger.Errorf("failed to launch the retention jobs, err: %v", err)
|
||||
|
||||
if err = r.execMgr.StopAndWait(ctx, id, 10*time.Second); err != nil {
|
||||
logger.Errorf("failed to stop the retention execution %d: %v", id, err)
|
||||
}
|
||||
|
||||
if err = r.execMgr.MarkError(ctx, id, err.Error()); err != nil {
|
||||
logger.Errorf("failed to mark error for the retention execution %d: %v", id, err)
|
||||
if e := r.execMgr.StopAndWaitWithError(ctx, id, 10*time.Second, err); e != nil {
|
||||
logger.Errorf("failed to stop the retention execution %d: %v", id, e)
|
||||
}
|
||||
} else if num == 0 {
|
||||
// no candidates, mark the execution as done directly
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
ar "github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/event/operator"
|
||||
"github.com/goharbor/harbor/src/controller/robot"
|
||||
@ -68,10 +67,11 @@ const (
|
||||
artfiactKey = "artifact"
|
||||
registrationKey = "registration"
|
||||
|
||||
artifactIDKey = "artifact_id"
|
||||
artifactTagKey = "artifact_tag"
|
||||
reportUUIDsKey = "report_uuids"
|
||||
robotIDKey = "robot_id"
|
||||
artifactIDKey = "artifact_id"
|
||||
artifactTagKey = "artifact_tag"
|
||||
reportUUIDsKey = "report_uuids"
|
||||
robotIDKey = "robot_id"
|
||||
enabledCapabilities = "enabled_capabilities"
|
||||
)
|
||||
|
||||
// uuidGenerator is a func template which is for generating UUID.
|
||||
@ -91,6 +91,7 @@ type launchScanJobParam struct {
|
||||
Artifact *ar.Artifact
|
||||
Tag string
|
||||
Reports []*scan.Report
|
||||
Type string
|
||||
}
|
||||
|
||||
// basicController is default implementation of api.Controller interface
|
||||
@ -193,6 +194,18 @@ func (bc *basicController) collectScanningArtifacts(ctx context.Context, r *scan
|
||||
return nil
|
||||
}
|
||||
|
||||
// because there are lots of in-toto sbom artifacts in dockerhub and replicated to Harbor, they are considered as image type
|
||||
// when scanning these type of sbom artifact, the scanner might assume it is image layer with tgz format, and if scanner read the layer with a stream of tgz,
|
||||
// it fail and close the stream abruptly and cause the pannic in the harbor core log
|
||||
// to avoid pannic, skip scan the in-toto sbom artifact sbom artifact
|
||||
unscannable, err := bc.ar.HasUnscannableLayer(ctx, a.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unscannable {
|
||||
return nil
|
||||
}
|
||||
|
||||
supported := hasCapability(r, a)
|
||||
|
||||
if !supported && a.IsImageIndex() {
|
||||
@ -242,23 +255,27 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
}
|
||||
|
||||
// Parse options
|
||||
opts, err := parseOptions(options...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "scan controller: scan")
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
if opts.FromEvent {
|
||||
// skip to return err for event related scan
|
||||
return nil
|
||||
}
|
||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
}
|
||||
|
||||
var (
|
||||
errs []error
|
||||
launchScanJobParams []*launchScanJobParam
|
||||
)
|
||||
handler := sca.GetScanHandler(opts.GetScanType())
|
||||
for _, art := range artifacts {
|
||||
reports, err := bc.makeReportPlaceholder(ctx, r, art)
|
||||
reports, err := handler.MakePlaceHolder(ctx, art, r)
|
||||
if err != nil {
|
||||
if errors.IsConflictErr(err) {
|
||||
errs = append(errs, err)
|
||||
@ -287,6 +304,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
Artifact: art,
|
||||
Tag: tag,
|
||||
Reports: reports,
|
||||
Type: opts.GetScanType(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -308,11 +326,18 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
"id": r.ID,
|
||||
"name": r.Name,
|
||||
},
|
||||
enabledCapabilities: map[string]interface{}{
|
||||
"type": opts.GetScanType(),
|
||||
},
|
||||
}
|
||||
if op := operator.FromContext(ctx); op != "" {
|
||||
extraAttrs["operator"] = op
|
||||
}
|
||||
executionID, err := bc.execMgr.Create(ctx, job.ImageScanJobVendorType, artifact.ID, task.ExecutionTriggerManual, extraAttrs)
|
||||
vendorType := handler.JobVendorType()
|
||||
// for vulnerability and generate sbom, use different vendor type
|
||||
// because the execution reaper only keep the latest execution for the vendor type IMAGE_SCAN
|
||||
// both vulnerability and sbom need to keep the latest scan execution to get the latest scan status
|
||||
executionID, err := bc.execMgr.Create(ctx, vendorType, artifact.ID, task.ExecutionTriggerManual, extraAttrs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -324,7 +349,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
for _, launchScanJobParam := range launchScanJobParams {
|
||||
launchScanJobParam.ExecutionID = opts.ExecutionID
|
||||
|
||||
if err := bc.launchScanJob(ctx, launchScanJobParam); err != nil {
|
||||
if err := bc.launchScanJob(ctx, launchScanJobParam, opts); err != nil {
|
||||
log.G(ctx).Warningf("scan artifact %s@%s failed, error: %v", artifact.RepositoryName, artifact.Digest, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
@ -339,15 +364,17 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
||||
}
|
||||
|
||||
// Stop scan job of a given artifact
|
||||
func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact) error {
|
||||
func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact, capType string) error {
|
||||
if artifact == nil {
|
||||
return errors.New("nil artifact to stop scan")
|
||||
}
|
||||
query := q.New(q.KeyWords{"extra_attrs.artifact.digest": artifact.Digest})
|
||||
vendorType := sca.GetScanHandler(capType).JobVendorType()
|
||||
query := q.New(q.KeyWords{"vendor_type": vendorType, "extra_attrs.artifact.digest": artifact.Digest, "extra_attrs.enabled_capabilities.type": capType})
|
||||
executions, err := bc.execMgr.List(ctx, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(executions) == 0 {
|
||||
message := fmt.Sprintf("no scan job for artifact digest=%v", artifact.Digest)
|
||||
return errors.BadRequestError(nil).WithMessage(message)
|
||||
@ -379,7 +406,9 @@ func (bc *basicController) ScanAll(ctx context.Context, trigger string, async bo
|
||||
}
|
||||
|
||||
err = bc.startScanAll(ctx, executionID)
|
||||
log.Errorf("failed to start scan all, executionID=%d, error: %v", executionID, err)
|
||||
if err != nil {
|
||||
log.Errorf("failed to start scan all, executionID=%d, error: %v", executionID, err)
|
||||
}
|
||||
}(bc.makeCtx())
|
||||
} else {
|
||||
if err := bc.startScanAll(ctx, executionID); err != nil {
|
||||
@ -541,61 +570,6 @@ func (bc *basicController) startScanAll(ctx context.Context, executionID int64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *basicController) makeReportPlaceholder(ctx context.Context, r *scanner.Registration, art *ar.Artifact) ([]*scan.Report, error) {
|
||||
mimeTypes := r.GetProducesMimeTypes(art.ManifestMediaType)
|
||||
|
||||
oldReports, err := bc.manager.GetBy(bc.cloneCtx(ctx), art.Digest, r.UUID, mimeTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := bc.assembleReports(ctx, oldReports...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(oldReports) > 0 {
|
||||
for _, oldReport := range oldReports {
|
||||
if !job.Status(oldReport.Status).Final() {
|
||||
return nil, errors.ConflictError(nil).WithMessage("a previous scan process is %s", oldReport.Status)
|
||||
}
|
||||
}
|
||||
|
||||
for _, oldReport := range oldReports {
|
||||
if err := bc.manager.Delete(ctx, oldReport.UUID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reports []*scan.Report
|
||||
|
||||
for _, pm := range r.GetProducesMimeTypes(art.ManifestMediaType) {
|
||||
report := &scan.Report{
|
||||
Digest: art.Digest,
|
||||
RegistrationUUID: r.UUID,
|
||||
MimeType: pm,
|
||||
}
|
||||
|
||||
create := func(ctx context.Context) error {
|
||||
reportUUID, err := bc.manager.Create(ctx, report)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
report.UUID = reportUUID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := orm.WithTransaction(create)(orm.SetTransactionOpNameToContext(ctx, "tx-make-report-placeholder")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reports = append(reports, report)
|
||||
}
|
||||
|
||||
return reports, nil
|
||||
}
|
||||
|
||||
// GetReport ...
|
||||
func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact, mimeTypes []string) ([]*scan.Report, error) {
|
||||
if artifact == nil {
|
||||
@ -671,37 +645,9 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
||||
}
|
||||
|
||||
// GetSummary ...
|
||||
func (bc *basicController) GetSummary(ctx context.Context, artifact *ar.Artifact, mimeTypes []string) (map[string]interface{}, error) {
|
||||
if artifact == nil {
|
||||
return nil, errors.New("no way to get report summaries for nil artifact")
|
||||
}
|
||||
|
||||
// Get reports first
|
||||
rps, err := bc.GetReport(ctx, artifact, mimeTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
summaries := make(map[string]interface{}, len(rps))
|
||||
for _, rp := range rps {
|
||||
sum, err := report.GenerateSummary(rp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s, ok := summaries[rp.MimeType]; ok {
|
||||
r, err := report.MergeSummary(rp.MimeType, s, sum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
summaries[rp.MimeType] = r
|
||||
} else {
|
||||
summaries[rp.MimeType] = sum
|
||||
}
|
||||
}
|
||||
|
||||
return summaries, nil
|
||||
func (bc *basicController) GetSummary(ctx context.Context, artifact *ar.Artifact, scanType string, mimeTypes []string) (map[string]interface{}, error) {
|
||||
handler := sca.GetScanHandler(scanType)
|
||||
return handler.GetSummary(ctx, artifact, mimeTypes)
|
||||
}
|
||||
|
||||
// GetScanLog ...
|
||||
@ -737,7 +683,7 @@ func (bc *basicController) GetScanLog(ctx context.Context, artifact *ar.Artifact
|
||||
if !scanTaskForArtifacts(t, artifactMap) {
|
||||
return nil, errors.NotFoundError(nil).WithMessage("scan log with uuid: %s not found", uuid)
|
||||
}
|
||||
for _, reportUUID := range getReportUUIDs(t.ExtraAttrs) {
|
||||
for _, reportUUID := range GetReportUUIDs(t.ExtraAttrs) {
|
||||
reportUUIDToTasks[reportUUID] = t
|
||||
}
|
||||
}
|
||||
@ -818,14 +764,6 @@ func scanTaskForArtifacts(task *task.Task, artifactMap map[int64]interface{}) bo
|
||||
return exist
|
||||
}
|
||||
|
||||
// DeleteReports ...
|
||||
func (bc *basicController) DeleteReports(ctx context.Context, digests ...string) error {
|
||||
if err := bc.manager.DeleteByDigests(ctx, digests...); err != nil {
|
||||
return errors.Wrap(err, "scan controller: delete reports")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bc *basicController) GetVulnerable(ctx context.Context, artifact *ar.Artifact, allowlist allowlist.CVESet, allowlistIsExpired bool) (*Vulnerable, error) {
|
||||
if artifact == nil {
|
||||
return nil, errors.New("no way to get vulnerable for nil artifact")
|
||||
@ -910,7 +848,7 @@ func (bc *basicController) GetVulnerable(ctx context.Context, artifact *ar.Artif
|
||||
}
|
||||
|
||||
// makeRobotAccount creates a robot account based on the arguments for scanning.
|
||||
func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64, repository string, registration *scanner.Registration) (*robot.Robot, error) {
|
||||
func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64, repository string, registration *scanner.Registration, permission []*types.Policy) (*robot.Robot, error) {
|
||||
// Use uuid as name to avoid duplicated entries.
|
||||
UUID, err := bc.uuid()
|
||||
if err != nil {
|
||||
@ -932,16 +870,7 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
||||
{
|
||||
Kind: "project",
|
||||
Namespace: projectName,
|
||||
Access: []*types.Policy{
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionScannerPull,
|
||||
},
|
||||
},
|
||||
Access: permission,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -960,7 +889,7 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
||||
}
|
||||
|
||||
// launchScanJob launches a job to run scan
|
||||
func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJobParam) error {
|
||||
func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJobParam, opts *Options) error {
|
||||
// don't launch scan job for the artifact which is not supported by the scanner
|
||||
if !hasCapability(param.Registration, param.Artifact) {
|
||||
return nil
|
||||
@ -978,7 +907,12 @@ func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJ
|
||||
return errors.Wrap(err, "scan controller: launch scan job")
|
||||
}
|
||||
|
||||
robot, err := bc.makeRobotAccount(ctx, param.Artifact.ProjectID, param.Artifact.RepositoryName, param.Registration)
|
||||
// Get Scanner handler by scan type to separate the scan logic for different scan types
|
||||
handler := sca.GetScanHandler(param.Type)
|
||||
if handler == nil {
|
||||
return fmt.Errorf("failed to get scan handler, type is %v", param.Type)
|
||||
}
|
||||
robot, err := bc.makeRobotAccount(ctx, param.Artifact.ProjectID, param.Artifact.RepositoryName, param.Registration, handler.RequiredPermissions())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "scan controller: launch scan job")
|
||||
}
|
||||
@ -994,6 +928,12 @@ func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJ
|
||||
Digest: param.Artifact.Digest,
|
||||
Tag: param.Tag,
|
||||
MimeType: param.Artifact.ManifestMediaType,
|
||||
Size: param.Artifact.Size,
|
||||
},
|
||||
RequestType: []*v1.ScanType{
|
||||
{
|
||||
Type: opts.GetScanType(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -1025,7 +965,8 @@ func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJ
|
||||
params[sca.JobParameterRequest] = sJSON
|
||||
params[sca.JobParameterMimes] = mimes
|
||||
params[sca.JobParameterRobot] = robotJSON
|
||||
|
||||
// because there is only one task type implementation
|
||||
// both the vulnerability scan and generate sbom use the same job type for now
|
||||
j := &task.Job{
|
||||
Name: job.ImageScanJobVendorType,
|
||||
Metadata: &job.Metadata{
|
||||
@ -1118,7 +1059,7 @@ func (bc *basicController) assembleReports(ctx context.Context, reports ...*scan
|
||||
|
||||
reportUUIDToTasks := map[string]*task.Task{}
|
||||
for _, task := range tasks {
|
||||
for _, reportUUID := range getReportUUIDs(task.ExtraAttrs) {
|
||||
for _, reportUUID := range GetReportUUIDs(task.ExtraAttrs) {
|
||||
reportUUIDToTasks[reportUUID] = task
|
||||
}
|
||||
}
|
||||
@ -1189,7 +1130,8 @@ func getArtifactTag(extraAttrs map[string]interface{}) string {
|
||||
return tag
|
||||
}
|
||||
|
||||
func getReportUUIDs(extraAttrs map[string]interface{}) []string {
|
||||
// GetReportUUIDs returns the report UUIDs from the extra attributes
|
||||
func GetReportUUIDs(extraAttrs map[string]interface{}) []string {
|
||||
var reportUUIDs []string
|
||||
|
||||
if extraAttrs != nil {
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
ormtesting "github.com/goharbor/harbor/src/testing/lib/orm"
|
||||
"github.com/goharbor/harbor/src/testing/mock"
|
||||
accessorytesting "github.com/goharbor/harbor/src/testing/pkg/accessory"
|
||||
scanTest "github.com/goharbor/harbor/src/testing/pkg/scan"
|
||||
postprocessorstesting "github.com/goharbor/harbor/src/testing/pkg/scan/postprocessors"
|
||||
reporttesting "github.com/goharbor/harbor/src/testing/pkg/scan/report"
|
||||
tasktesting "github.com/goharbor/harbor/src/testing/pkg/task"
|
||||
@ -63,21 +64,24 @@ import (
|
||||
type ControllerTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
scanHandler *scanTest.Handler
|
||||
|
||||
artifactCtl *artifacttesting.Controller
|
||||
accessoryMgr *accessorytesting.Manager
|
||||
originalArtifactCtl artifact.Controller
|
||||
|
||||
tagCtl *tagtesting.FakeController
|
||||
|
||||
registration *scanner.Registration
|
||||
artifact *artifact.Artifact
|
||||
rawReport string
|
||||
registration *scanner.Registration
|
||||
artifact *artifact.Artifact
|
||||
wrongArtifact *artifact.Artifact
|
||||
rawReport string
|
||||
|
||||
execMgr *tasktesting.ExecutionManager
|
||||
taskMgr *tasktesting.Manager
|
||||
reportMgr *reporttesting.Manager
|
||||
ar artifact.Controller
|
||||
c Controller
|
||||
c *basicController
|
||||
reportConverter *postprocessorstesting.ScanReportV1ToV2Converter
|
||||
cache *mockcache.Cache
|
||||
}
|
||||
@ -89,6 +93,8 @@ func TestController(t *testing.T) {
|
||||
|
||||
// SetupSuite ...
|
||||
func (suite *ControllerTestSuite) SetupSuite() {
|
||||
suite.scanHandler = &scanTest.Handler{}
|
||||
sca.RegisterScanHanlder(v1.ScanTypeVulnerability, suite.scanHandler)
|
||||
suite.originalArtifactCtl = artifact.Ctl
|
||||
suite.artifactCtl = &artifacttesting.Controller{}
|
||||
artifact.Ctl = suite.artifactCtl
|
||||
@ -100,6 +106,9 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
suite.artifact.Digest = "digest-code"
|
||||
suite.artifact.ManifestMediaType = v1.MimeTypeDockerArtifact
|
||||
|
||||
suite.wrongArtifact = &artifact.Artifact{Artifact: art.Artifact{ID: 2, ProjectID: 1}}
|
||||
suite.wrongArtifact.Digest = "digest-wrong"
|
||||
|
||||
m := &v1.ScannerAdapterMetadata{
|
||||
Scanner: &v1.Scanner{
|
||||
Name: "Trivy",
|
||||
@ -107,6 +116,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
Version: "0.1.0",
|
||||
},
|
||||
Capabilities: []*v1.ScannerCapability{{
|
||||
Type: v1.ScanTypeVulnerability,
|
||||
ConsumesMimeTypes: []string{
|
||||
v1.MimeTypeOCIArtifact,
|
||||
v1.MimeTypeDockerArtifact,
|
||||
@ -114,7 +124,17 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
ProducesMimeTypes: []string{
|
||||
v1.MimeTypeNativeReport,
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Type: v1.ScanTypeSbom,
|
||||
ConsumesMimeTypes: []string{
|
||||
v1.MimeTypeOCIArtifact,
|
||||
},
|
||||
ProducesMimeTypes: []string{
|
||||
v1.MimeTypeSBOMReport,
|
||||
},
|
||||
},
|
||||
},
|
||||
Properties: v1.ScannerProperties{
|
||||
"extra": "testing",
|
||||
},
|
||||
@ -179,9 +199,24 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
},
|
||||
}
|
||||
|
||||
sbomReport := []*scan.Report{
|
||||
{
|
||||
ID: 12,
|
||||
UUID: "rp-uuid-002",
|
||||
Digest: "digest-code",
|
||||
RegistrationUUID: "uuid001",
|
||||
MimeType: "application/vnd.scanner.adapter.sbom.report.harbor+json; version=1.0",
|
||||
Status: "Success",
|
||||
Report: `{"sbom_digest": "sha256:1234567890", "scan_status": "Success", "duration": 3, "start_time": "2021-09-01T00:00:00Z", "end_time": "2021-09-01T00:00:03Z"}`,
|
||||
},
|
||||
}
|
||||
|
||||
emptySBOMReport := []*scan.Report{{Report: ``, UUID: "rp-uuid-004"}}
|
||||
mgr.On("GetBy", mock.Anything, suite.artifact.Digest, suite.registration.UUID, []string{v1.MimeTypeNativeReport}).Return(reports, nil)
|
||||
mgr.On("GetBy", mock.Anything, suite.artifact.Digest, suite.registration.UUID, []string{v1.MimeTypeSBOMReport}).Return(sbomReport, nil)
|
||||
mgr.On("GetBy", mock.Anything, suite.wrongArtifact.Digest, suite.registration.UUID, []string{v1.MimeTypeSBOMReport}).Return(emptySBOMReport, nil)
|
||||
mgr.On("Get", mock.Anything, "rp-uuid-001").Return(reports[0], nil)
|
||||
mgr.On("UpdateReportData", "rp-uuid-001", suite.rawReport, (int64)(10000)).Return(nil)
|
||||
mgr.On("Update", "rp-uuid-001", suite.rawReport, (int64)(10000)).Return(nil)
|
||||
mgr.On("UpdateStatus", "the-uuid-123", "Success", (int64)(10000)).Return(nil)
|
||||
suite.reportMgr = mgr
|
||||
|
||||
@ -307,6 +342,8 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
||||
reportConverter: &postprocessorstesting.ScanReportV1ToV2Converter{},
|
||||
cache: func() cache.Cache { return suite.cache },
|
||||
}
|
||||
mock.OnAnything(suite.scanHandler, "JobVendorType").Return("IMAGE_SCAN")
|
||||
|
||||
}
|
||||
|
||||
// TearDownSuite ...
|
||||
@ -316,9 +353,23 @@ func (suite *ControllerTestSuite) TearDownSuite() {
|
||||
|
||||
// TestScanControllerScan ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
rpts := []*scan.Report{
|
||||
{UUID: "uuid"},
|
||||
}
|
||||
requiredPermission := []*types.Policy{
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionScannerPull,
|
||||
},
|
||||
}
|
||||
{
|
||||
// artifact not provieded
|
||||
suite.Require().Error(suite.c.Scan(context.TODO(), nil))
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Times(3)
|
||||
}
|
||||
|
||||
{
|
||||
@ -337,6 +388,8 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
|
||||
mock.OnAnything(suite.execMgr, "Create").Return(int64(1), nil).Once()
|
||||
mock.OnAnything(suite.taskMgr, "Create").Return(int64(1), nil).Once()
|
||||
mock.OnAnything(suite.scanHandler, "MakePlaceHolder").Return(rpts, nil).Once()
|
||||
mock.OnAnything(suite.scanHandler, "RequiredPermissions").Return(requiredPermission).Once()
|
||||
|
||||
ctx := orm.NewContext(context.TODO(), &ormtesting.FakeOrmer{})
|
||||
|
||||
@ -356,7 +409,10 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
}, nil).Once()
|
||||
|
||||
mock.OnAnything(suite.reportMgr, "Delete").Return(fmt.Errorf("delete failed")).Once()
|
||||
|
||||
mock.OnAnything(suite.scanHandler, "MakePlaceHolder").Return(rpts, nil).Once()
|
||||
mock.OnAnything(suite.scanHandler, "RequiredPermissions").Return(requiredPermission).Once()
|
||||
mock.OnAnything(suite.execMgr, "Create").Return(int64(1), nil).Once()
|
||||
mock.OnAnything(suite.taskMgr, "Create").Return(int64(0), fmt.Errorf("failed to create task")).Once()
|
||||
suite.Require().Error(suite.c.Scan(context.TODO(), suite.artifact))
|
||||
}
|
||||
|
||||
@ -371,7 +427,9 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return([]*task.Task{
|
||||
{ExtraAttrs: suite.makeExtraAttrs(int64(1), "rp-uuid-001"), Status: "Running"},
|
||||
}, nil).Once()
|
||||
|
||||
mock.OnAnything(suite.scanHandler, "MakePlaceHolder").Return(rpts, nil).Once()
|
||||
mock.OnAnything(suite.scanHandler, "RequiredPermissions").Return(requiredPermission).Once()
|
||||
mock.OnAnything(suite.execMgr, "Create").Return(int64(0), fmt.Errorf("failed to create execution")).Once()
|
||||
suite.Require().Error(suite.c.Scan(context.TODO(), suite.artifact))
|
||||
}
|
||||
}
|
||||
@ -380,7 +438,7 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
||||
func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
{
|
||||
// artifact not provieded
|
||||
suite.Require().Error(suite.c.Stop(context.TODO(), nil))
|
||||
suite.Require().Error(suite.c.Stop(context.TODO(), nil, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
@ -392,7 +450,7 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().NoError(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().NoError(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
@ -402,7 +460,7 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
@ -411,12 +469,13 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
}
|
||||
|
||||
// TestScanControllerGetReport ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
@ -432,23 +491,9 @@ func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
||||
assert.Equal(suite.T(), 1, len(rep))
|
||||
}
|
||||
|
||||
// TestScanControllerGetSummary ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetSummary() {
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.accessoryMgr, "List").Return([]accessoryModel.Accessory{}, nil).Once()
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(suite.artifact)
|
||||
}).Once()
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return(nil, nil).Once()
|
||||
|
||||
sum, err := suite.c.GetSummary(ctx, suite.artifact, []string{v1.MimeTypeNativeReport})
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), 1, len(sum))
|
||||
}
|
||||
|
||||
// TestScanControllerGetScanLog ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetScanLog() {
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return([]*task.Task{
|
||||
{
|
||||
@ -459,6 +504,13 @@ func (suite *ControllerTestSuite) TestScanControllerGetScanLog() {
|
||||
|
||||
mock.OnAnything(suite.taskMgr, "GetLog").Return([]byte("log"), nil).Once()
|
||||
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(suite.artifact)
|
||||
}).Once()
|
||||
|
||||
mock.OnAnything(suite.accessoryMgr, "List").Return(nil, nil)
|
||||
|
||||
bytes, err := suite.c.GetScanLog(ctx, &artifact.Artifact{Artifact: art.Artifact{ID: 1, ProjectID: 1}}, "rp-uuid-001")
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Condition(suite.T(), func() (success bool) {
|
||||
@ -469,6 +521,7 @@ func (suite *ControllerTestSuite) TestScanControllerGetScanLog() {
|
||||
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetMultiScanLog() {
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Times(4)
|
||||
suite.taskMgr.On("ListScanTasksByReportUUID", ctx, "rp-uuid-001").Return([]*task.Task{
|
||||
{
|
||||
ID: 1,
|
||||
@ -531,7 +584,22 @@ func (suite *ControllerTestSuite) TestScanAll() {
|
||||
{
|
||||
// no artifacts found when scan all
|
||||
executionID := int64(1)
|
||||
|
||||
rpts := []*scan.Report{
|
||||
{UUID: "uuid"},
|
||||
}
|
||||
requiredPermission := []*types.Policy{
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionScannerPull,
|
||||
},
|
||||
}
|
||||
mock.OnAnything(suite.scanHandler, "MakePlaceHolder").Return(rpts, nil).Once()
|
||||
mock.OnAnything(suite.scanHandler, "RequiredPermissions").Return(requiredPermission).Once()
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
suite.execMgr.On(
|
||||
"Create", mock.Anything, "SCAN_ALL", int64(0), "SCHEDULE",
|
||||
mock.Anything).Return(executionID, nil).Once()
|
||||
@ -572,8 +640,6 @@ func (suite *ControllerTestSuite) TestScanAll() {
|
||||
walkFn(suite.artifact)
|
||||
}).Once()
|
||||
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return(nil, nil).Once()
|
||||
|
||||
mock.OnAnything(suite.reportMgr, "Delete").Return(nil).Once()
|
||||
mock.OnAnything(suite.reportMgr, "Create").Return("uuid", nil).Once()
|
||||
mock.OnAnything(suite.taskMgr, "Create").Return(int64(0), fmt.Errorf("failed")).Once()
|
||||
@ -600,16 +666,6 @@ func (suite *ControllerTestSuite) TestStopScanAll() {
|
||||
suite.NoError(err)
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestDeleteReports() {
|
||||
suite.reportMgr.On("DeleteByDigests", context.TODO(), "digest").Return(nil).Once()
|
||||
|
||||
suite.NoError(suite.c.DeleteReports(context.TODO(), "digest"))
|
||||
|
||||
suite.reportMgr.On("DeleteByDigests", context.TODO(), "digest").Return(fmt.Errorf("failed")).Once()
|
||||
|
||||
suite.Error(suite.c.DeleteReports(context.TODO(), "digest"))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) makeExtraAttrs(artifactID int64, reportUUIDs ...string) map[string]interface{} {
|
||||
b, _ := json.Marshal(map[string]interface{}{reportUUIDsKey: reportUUIDs})
|
||||
|
||||
|
@ -120,6 +120,13 @@ func scanTaskStatusChange(ctx context.Context, taskID int64, status string) (err
|
||||
if operator, ok := exec.ExtraAttrs["operator"].(string); ok {
|
||||
e.Operator = operator
|
||||
}
|
||||
|
||||
// extract ScanType if exist in ExtraAttrs
|
||||
if c, ok := exec.ExtraAttrs["enabled_capabilities"].(map[string]interface{}); ok {
|
||||
if Type, ok := c["type"].(string); ok {
|
||||
e.ScanType = Type
|
||||
}
|
||||
}
|
||||
// fire event
|
||||
notification.AddEvent(ctx, e)
|
||||
}
|
||||
|
@ -86,6 +86,18 @@ func (c *checker) IsScannable(ctx context.Context, art *artifact.Artifact) (bool
|
||||
return artifact.ErrBreak
|
||||
}
|
||||
|
||||
// because there are lots of in-toto sbom artifacts in dockerhub and replicated to Harbor, they are considered as image type
|
||||
// when scanning these type of sbom artifact, the scanner might assume it is image layer with tgz format, and if scanner read the layer with a stream of tgz,
|
||||
// it fail and close the stream abruptly and cause the pannic in the harbor core log
|
||||
// to avoid pannic, skip scan the in-toto sbom artifact sbom artifact
|
||||
unscannable, err := c.artifactCtl.HasUnscannableLayer(ctx, a.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unscannable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,7 @@ func (suite *CheckerTestSuite) TestIsScannable() {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(art)
|
||||
})
|
||||
|
||||
mock.OnAnything(c.artifactCtl, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
isScannable, err := c.IsScannable(context.TODO(), art)
|
||||
suite.Nil(err)
|
||||
suite.False(isScannable)
|
||||
@ -97,6 +97,7 @@ func (suite *CheckerTestSuite) TestIsScannable() {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(art)
|
||||
})
|
||||
mock.OnAnything(c.artifactCtl, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
|
||||
isScannable, err := c.IsScannable(context.TODO(), art)
|
||||
suite.Nil(err)
|
||||
|
@ -55,10 +55,11 @@ type Controller interface {
|
||||
// Arguments:
|
||||
// ctx context.Context : the context for this method
|
||||
// artifact *artifact.Artifact : the artifact whose scan job to be stopped
|
||||
// capType string : the capability type of the scanner, vulnerability or SBOM.
|
||||
//
|
||||
// Returns:
|
||||
// error : non nil error if any errors occurred
|
||||
Stop(ctx context.Context, artifact *artifact.Artifact) error
|
||||
Stop(ctx context.Context, artifact *artifact.Artifact, capType string) error
|
||||
|
||||
// GetReport gets the reports for the given artifact identified by the digest
|
||||
//
|
||||
@ -82,7 +83,7 @@ type Controller interface {
|
||||
// Returns:
|
||||
// map[string]interface{} : report summaries indexed by mime types
|
||||
// error : non nil error if any errors occurred
|
||||
GetSummary(ctx context.Context, artifact *artifact.Artifact, mimeTypes []string) (map[string]interface{}, error)
|
||||
GetSummary(ctx context.Context, artifact *artifact.Artifact, scanType string, mimeTypes []string) (map[string]interface{}, error)
|
||||
|
||||
// Get the scan log for the specified artifact with the given digest
|
||||
//
|
||||
@ -95,15 +96,6 @@ type Controller interface {
|
||||
// error : non nil error if any errors occurred
|
||||
GetScanLog(ctx context.Context, art *artifact.Artifact, uuid string) ([]byte, error)
|
||||
|
||||
// Delete the reports related with the specified digests
|
||||
//
|
||||
// Arguments:
|
||||
// digests ...string : specify one or more digests whose reports will be deleted
|
||||
//
|
||||
// Returns:
|
||||
// error : non nil error if any errors occurred
|
||||
DeleteReports(ctx context.Context, digests ...string) error
|
||||
|
||||
// Scan all the artifacts
|
||||
//
|
||||
// Arguments:
|
||||
|
@ -14,10 +14,22 @@
|
||||
|
||||
package scan
|
||||
|
||||
import v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
|
||||
// Options keep the settings/configurations for scanning.
|
||||
type Options struct {
|
||||
ExecutionID int64 // The execution id to scan artifact
|
||||
Tag string // The tag of the artifact to scan
|
||||
ScanType string // The scan type could be sbom or vulnerability
|
||||
FromEvent bool // indicate the current call from event or not
|
||||
}
|
||||
|
||||
// GetScanType returns the scan type. for backward compatibility, the default type is vulnerability.
|
||||
func (o *Options) GetScanType() string {
|
||||
if len(o.ScanType) == 0 {
|
||||
o.ScanType = v1.ScanTypeVulnerability
|
||||
}
|
||||
return o.ScanType
|
||||
}
|
||||
|
||||
// Option represents an option item by func template.
|
||||
@ -44,3 +56,19 @@ func WithTag(tag string) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithScanType set the scanType
|
||||
func WithScanType(scanType string) Option {
|
||||
return func(options *Options) error {
|
||||
options.ScanType = scanType
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFromEvent set the caller's source
|
||||
func WithFromEvent(fromEvent bool) Option {
|
||||
return func(options *Options) error {
|
||||
options.FromEvent = fromEvent
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,8 @@ const (
|
||||
proScannerMetaKey = "projectScanner"
|
||||
statusUnhealthy = "unhealthy"
|
||||
statusHealthy = "healthy"
|
||||
// RetrieveCapFailMsg the message indicate failed to retrieve the scanner capabilities
|
||||
RetrieveCapFailMsg = "failed to retrieve scanner capabilities, error %v"
|
||||
)
|
||||
|
||||
// DefaultController is a singleton api controller for plug scanners
|
||||
@ -79,7 +81,12 @@ func (bc *basicController) ListRegistrations(ctx context.Context, query *q.Query
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "api controller: list registrations")
|
||||
}
|
||||
|
||||
for _, r := range l {
|
||||
if err := bc.RetrieveCap(ctx, r); err != nil {
|
||||
log.Warningf(RetrieveCapFailMsg, err)
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
@ -122,10 +129,26 @@ func (bc *basicController) GetRegistration(ctx context.Context, registrationUUID
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "api controller: get registration")
|
||||
}
|
||||
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if err := bc.RetrieveCap(ctx, r); err != nil {
|
||||
log.Warningf(RetrieveCapFailMsg, err)
|
||||
return r, nil
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (bc *basicController) RetrieveCap(ctx context.Context, r *scanner.Registration) error {
|
||||
mt, err := bc.Ping(ctx, r)
|
||||
if err != nil {
|
||||
logger.Errorf("Get registration error: %s", err)
|
||||
return err
|
||||
}
|
||||
r.Capabilities = mt.ConvertCapability()
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegistrationExists ...
|
||||
func (bc *basicController) RegistrationExists(ctx context.Context, registrationUUID string) bool {
|
||||
registration, err := bc.manager.Get(ctx, registrationUUID)
|
||||
|
@ -113,7 +113,7 @@ type Controller interface {
|
||||
// Arguments:
|
||||
// ctx context.Context : the context.Context for this method
|
||||
// projectID int64 : the ID of the given project
|
||||
// scannerID string : the UUID of the the scanner
|
||||
// scannerID string : the UUID of the scanner
|
||||
//
|
||||
// Returns:
|
||||
// error : non nil error if any errors occurred
|
||||
@ -154,4 +154,7 @@ type Controller interface {
|
||||
// *v1.ScannerAdapterMetadata : metadata returned by the scanner if successfully ping
|
||||
// error : non nil error if any errors occurred
|
||||
GetMetadata(ctx context.Context, registrationUUID string) (*v1.ScannerAdapterMetadata, error)
|
||||
|
||||
// RetrieveCap retrieve scanner capabilities
|
||||
RetrieveCap(ctx context.Context, r *scanner.Registration) error
|
||||
}
|
||||
|
@ -119,11 +119,8 @@ func (c *controller) createCleanupTask(ctx context.Context, jobParams job.Parame
|
||||
|
||||
func (c *controller) markError(ctx context.Context, executionID int64, err error) {
|
||||
// try to stop the execution first in case that some tasks are already created
|
||||
if err := c.execMgr.StopAndWait(ctx, executionID, 10*time.Second); err != nil {
|
||||
log.Errorf("failed to stop the execution %d: %v", executionID, err)
|
||||
}
|
||||
if err := c.execMgr.MarkError(ctx, executionID, err.Error()); err != nil {
|
||||
log.Errorf("failed to mark error for the execution %d: %v", executionID, err)
|
||||
if e := c.execMgr.StopAndWaitWithError(ctx, executionID, 10*time.Second, err); e != nil {
|
||||
log.Errorf("failed to stop the execution %d: %v", executionID, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ func (suite *SystemArtifactCleanupTestSuite) TestStartCleanupErrorDuringTaskCrea
|
||||
suite.taskMgr.On("Create", ctx, executionID, mock.Anything).Return(taskId, errors.New("test error")).Once()
|
||||
|
||||
suite.execMgr.On("MarkError", ctx, executionID, mock.Anything).Return(nil).Once()
|
||||
suite.execMgr.On("StopAndWait", ctx, executionID, mock.Anything).Return(nil).Once()
|
||||
suite.execMgr.On("StopAndWaitWithError", ctx, executionID, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
err := suite.ctl.Start(ctx, false, "SCHEDULE")
|
||||
suite.Error(err)
|
||||
|
@ -41,8 +41,8 @@ const (
|
||||
systemVendorID = -1
|
||||
|
||||
cronTypeCustom = "Custom"
|
||||
// run for every hour
|
||||
cronSpec = "0 0 * * * *"
|
||||
// run for every day
|
||||
cronSpec = "0 0 0 * * *"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -63,7 +63,13 @@ func (oc *OIDCController) RedirectLogin() {
|
||||
oc.SendInternalServerError(err)
|
||||
return
|
||||
}
|
||||
if err := oc.SetSession(redirectURLKey, oc.Ctx.Request.URL.Query().Get("redirect_url")); err != nil {
|
||||
redirectURL := oc.Ctx.Request.URL.Query().Get("redirect_url")
|
||||
if !utils.IsLocalPath(redirectURL) {
|
||||
log.Errorf("invalid redirect url: %v", redirectURL)
|
||||
oc.SendBadRequestError(fmt.Errorf("cannot redirect to other site"))
|
||||
return
|
||||
}
|
||||
if err := oc.SetSession(redirectURLKey, redirectURL); err != nil {
|
||||
log.Errorf("failed to set session for key: %s, error: %v", redirectURLKey, err)
|
||||
oc.SendInternalServerError(err)
|
||||
return
|
||||
|
@ -60,6 +60,7 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/cosign"
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/notation"
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/nydus"
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/sbom"
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/subject"
|
||||
"github.com/goharbor/harbor/src/pkg/audit"
|
||||
dbCfg "github.com/goharbor/harbor/src/pkg/config/db"
|
||||
@ -69,6 +70,8 @@ import (
|
||||
"github.com/goharbor/harbor/src/pkg/oidc"
|
||||
"github.com/goharbor/harbor/src/pkg/scan"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
|
||||
_ "github.com/goharbor/harbor/src/pkg/scan/sbom"
|
||||
_ "github.com/goharbor/harbor/src/pkg/scan/vulnerability"
|
||||
pkguser "github.com/goharbor/harbor/src/pkg/user"
|
||||
"github.com/goharbor/harbor/src/pkg/version"
|
||||
"github.com/goharbor/harbor/src/server"
|
||||
@ -102,14 +105,14 @@ func gracefulShutdown(closing, done chan struct{}, shutdowns ...func()) {
|
||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
log.Infof("capture system signal %s, to close \"closing\" channel", <-signals)
|
||||
close(closing)
|
||||
shutdownChan := make(chan struct{}, 1)
|
||||
shutdownChan := make(chan struct{})
|
||||
go func() {
|
||||
defer close(shutdownChan)
|
||||
for _, s := range shutdowns {
|
||||
s()
|
||||
}
|
||||
<-done
|
||||
log.Infof("Goroutines exited normally")
|
||||
shutdownChan <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-shutdownChan:
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
"github.com/docker/distribution/registry/auth/token"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/security"
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/registry/auth/token"
|
||||
jwt "github.com/golang-jwt/jwt/v4"
|
||||
jwt "github.com/golang-jwt/jwt/v5"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
|
182
src/go.mod
182
src/go.mod
@ -1,85 +1,85 @@
|
||||
module github.com/goharbor/harbor/src
|
||||
|
||||
go 1.21
|
||||
go 1.22.3
|
||||
|
||||
require (
|
||||
github.com/FZambia/sentinel v1.1.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
||||
github.com/aws/aws-sdk-go v1.34.28
|
||||
github.com/beego/beego/v2 v2.0.6
|
||||
github.com/aws/aws-sdk-go v1.53.14
|
||||
github.com/beego/beego/v2 v2.2.1
|
||||
github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
|
||||
github.com/bmatcuk/doublestar v1.3.4
|
||||
github.com/casbin/casbin v1.9.1
|
||||
github.com/cenkalti/backoff/v4 v4.2.1
|
||||
github.com/cloudevents/sdk-go/v2 v2.14.0
|
||||
github.com/coreos/go-oidc/v3 v3.9.0
|
||||
github.com/cenkalti/backoff/v4 v4.3.0
|
||||
github.com/cloudevents/sdk-go/v2 v2.15.2
|
||||
github.com/coreos/go-oidc/v3 v3.10.0
|
||||
github.com/dghubble/sling v1.1.0
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.1
|
||||
github.com/go-ldap/ldap/v3 v3.2.4
|
||||
github.com/go-openapi/errors v0.20.4
|
||||
github.com/go-openapi/loads v0.21.2
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7
|
||||
github.com/go-ldap/ldap/v3 v3.4.6
|
||||
github.com/go-openapi/errors v0.22.0
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.2
|
||||
github.com/go-openapi/spec v0.20.11
|
||||
github.com/go-openapi/strfmt v0.21.8
|
||||
github.com/go-openapi/swag v0.22.7
|
||||
github.com/go-openapi/validate v0.22.3
|
||||
github.com/go-openapi/spec v0.20.11 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0
|
||||
github.com/go-openapi/swag v0.23.0
|
||||
github.com/go-openapi/validate v0.22.3 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/gocarina/gocsv v0.0.0-20210516172204-ca9e8a8ddea8
|
||||
github.com/gocraft/work v0.5.1
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang-migrate/migrate/v4 v4.16.2
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
||||
github.com/golang-migrate/migrate/v4 v4.17.1
|
||||
github.com/gomodule/redigo v2.0.0+incompatible
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/gorilla/csrf v1.6.2
|
||||
github.com/google/go-containerregistry v0.19.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/csrf v1.7.2
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/graph-gophers/dataloader v5.0.0+incompatible
|
||||
github.com/jackc/pgconn v1.14.0
|
||||
github.com/jackc/pgx/v4 v4.18.1
|
||||
github.com/jackc/pgconn v1.14.3
|
||||
github.com/jackc/pgx/v4 v4.18.3
|
||||
github.com/jpillora/backoff v1.0.0
|
||||
github.com/ncw/swift v1.0.49 // indirect
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.17.0
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/robfig/cron/v3 v3.0.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v1.0.62
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.233+incompatible
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1
|
||||
github.com/volcengine/volcengine-go-sdk v1.0.97
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.46.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0
|
||||
go.opentelemetry.io/otel v1.21.0
|
||||
github.com/volcengine/volcengine-go-sdk v1.0.138
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.51.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0
|
||||
go.opentelemetry.io/otel v1.28.0
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.0.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0
|
||||
go.opentelemetry.io/otel/sdk v1.21.0
|
||||
go.opentelemetry.io/otel/trace v1.21.0
|
||||
go.uber.org/ratelimit v0.2.0
|
||||
golang.org/x/crypto v0.17.0
|
||||
golang.org/x/net v0.17.0
|
||||
golang.org/x/oauth2 v0.13.0
|
||||
golang.org/x/sync v0.3.0
|
||||
golang.org/x/text v0.14.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.opentelemetry.io/otel/trace v1.28.0
|
||||
go.uber.org/ratelimit v0.3.1
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/text v0.16.0
|
||||
golang.org/x/time v0.5.0
|
||||
gopkg.in/h2non/gock.v1 v1.1.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.11.3
|
||||
k8s.io/api v0.29.0
|
||||
k8s.io/apimachinery v0.29.0
|
||||
k8s.io/client-go v0.29.0
|
||||
helm.sh/helm/v3 v3.15.2
|
||||
k8s.io/api v0.30.2
|
||||
k8s.io/apimachinery v0.30.2
|
||||
k8s.io/client-go v0.30.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.23.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.3.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v37.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
@ -88,32 +88,36 @@ require (
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
|
||||
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/docker/cli v25.0.1+incompatible // indirect
|
||||
github.com/docker/docker v25.0.5+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
|
||||
github.com/gorilla/securecookie v1.1.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
@ -123,58 +127,64 @@ require (
|
||||
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.2 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.3 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgtype v1.14.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/klauspost/compress v1.17.2 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.3 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/robfig/cron v1.0.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect
|
||||
github.com/sirupsen/logrus v1.9.2 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/volcengine/volc-sdk-golang v1.0.23 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/term v0.15.0 // indirect
|
||||
google.golang.org/api v0.126.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
google.golang.org/api v0.171.0 // indirect
|
||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
|
559
src/go.sum
559
src/go.sum
File diff suppressed because it is too large
Load Diff
@ -22,6 +22,8 @@ const (
|
||||
|
||||
// ImageScanJobVendorType is name of scan job it will be used as key to register to job service.
|
||||
ImageScanJobVendorType = "IMAGE_SCAN"
|
||||
// SBOMJobVendorType key to create sbom generate execution.
|
||||
SBOMJobVendorType = "SBOM"
|
||||
// GarbageCollectionVendorType job name
|
||||
GarbageCollectionVendorType = "GARBAGE_COLLECTION"
|
||||
// ReplicationVendorType : the name of the replication job in job service
|
||||
@ -52,6 +54,7 @@ var (
|
||||
// executionSweeperCount stores the count for execution retained
|
||||
executionSweeperCount = map[string]int64{
|
||||
ImageScanJobVendorType: 1,
|
||||
SBOMJobVendorType: 1,
|
||||
ScanAllVendorType: 1,
|
||||
PurgeAuditVendorType: 10,
|
||||
ExecSweepVendorType: 10,
|
||||
|
@ -47,7 +47,7 @@ func (dbg *DBGetter) Retrieve(logID string) ([]byte, error) {
|
||||
sz := int64(len(jobLog.Content))
|
||||
var buf []byte
|
||||
sizeLimit := logSizeLimit()
|
||||
if sizeLimit <= 0 {
|
||||
if sizeLimit <= 0 || sz <= sizeLimit {
|
||||
buf = []byte(jobLog.Content)
|
||||
return buf, nil
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/jobservice/config"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/backend"
|
||||
"github.com/goharbor/harbor/src/jobservice/logger/sweeper"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
@ -44,9 +45,11 @@ func TestDBGetter(t *testing.T) {
|
||||
err = l.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
_ = config.DefaultConfig.Load("../../config_test.yml", true)
|
||||
dbGetter := NewDBGetter()
|
||||
ll, err := dbGetter.Retrieve(uuid)
|
||||
require.Nil(t, err)
|
||||
require.NotEqual(t, 0, len(ll))
|
||||
log.Infof("get logger %s", ll)
|
||||
|
||||
err = sweeper.PrepareDBSweep()
|
||||
|
@ -36,6 +36,8 @@ import (
|
||||
_ "github.com/goharbor/harbor/src/pkg/accessory/model/subject"
|
||||
_ "github.com/goharbor/harbor/src/pkg/config/inmemory"
|
||||
_ "github.com/goharbor/harbor/src/pkg/config/rest"
|
||||
_ "github.com/goharbor/harbor/src/pkg/scan/sbom"
|
||||
_ "github.com/goharbor/harbor/src/pkg/scan/vulnerability"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -1,17 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mgt
|
||||
|
||||
//go:generate mockery --name Manager --output . --outpkg mgt --filename mock_manager.go --structname MockManager --inpackage
|
@ -1,9 +1,10 @@
|
||||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package mgt
|
||||
|
||||
import (
|
||||
job "github.com/goharbor/harbor/src/jobservice/job"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
query "github.com/goharbor/harbor/src/jobservice/common/query"
|
||||
@ -18,6 +19,10 @@ type MockManager struct {
|
||||
func (_m *MockManager) GetJob(jobID string) (*job.Stats, error) {
|
||||
ret := _m.Called(jobID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetJob")
|
||||
}
|
||||
|
||||
var r0 *job.Stats
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string) (*job.Stats, error)); ok {
|
||||
@ -44,6 +49,10 @@ func (_m *MockManager) GetJob(jobID string) (*job.Stats, error) {
|
||||
func (_m *MockManager) GetJobs(q *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
ret := _m.Called(q)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetJobs")
|
||||
}
|
||||
|
||||
var r0 []*job.Stats
|
||||
var r1 int64
|
||||
var r2 error
|
||||
@ -77,6 +86,10 @@ func (_m *MockManager) GetJobs(q *query.Parameter) ([]*job.Stats, int64, error)
|
||||
func (_m *MockManager) GetPeriodicExecution(pID string, q *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
ret := _m.Called(pID, q)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetPeriodicExecution")
|
||||
}
|
||||
|
||||
var r0 []*job.Stats
|
||||
var r1 int64
|
||||
var r2 error
|
||||
@ -110,6 +123,10 @@ func (_m *MockManager) GetPeriodicExecution(pID string, q *query.Parameter) ([]*
|
||||
func (_m *MockManager) GetScheduledJobs(q *query.Parameter) ([]*job.Stats, int64, error) {
|
||||
ret := _m.Called(q)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetScheduledJobs")
|
||||
}
|
||||
|
||||
var r0 []*job.Stats
|
||||
var r1 int64
|
||||
var r2 error
|
||||
@ -143,6 +160,10 @@ func (_m *MockManager) GetScheduledJobs(q *query.Parameter) ([]*job.Stats, int64
|
||||
func (_m *MockManager) SaveJob(_a0 *job.Stats) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveJob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*job.Stats) error); ok {
|
||||
r0 = rf(_a0)
|
||||
|
@ -1,17 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package period
|
||||
|
||||
//go:generate mockery --name Scheduler --output . --outpkg period --filename mock_scheduler.go --structname MockScheduler --inpackage
|
@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.43.2. DO NOT EDIT.
|
||||
|
||||
package period
|
||||
|
||||
@ -13,6 +13,10 @@ type MockScheduler struct {
|
||||
func (_m *MockScheduler) Schedule(policy *Policy) (int64, error) {
|
||||
ret := _m.Called(policy)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Schedule")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*Policy) (int64, error)); ok {
|
||||
@ -42,6 +46,10 @@ func (_m *MockScheduler) Start() {
|
||||
func (_m *MockScheduler) UnSchedule(policyID string) error {
|
||||
ret := _m.Called(policyID)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for UnSchedule")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string) error); ok {
|
||||
r0 = rf(policyID)
|
||||
|
2
src/lib/cache/cache.go
vendored
2
src/lib/cache/cache.go
vendored
@ -47,8 +47,6 @@ type Iterator interface {
|
||||
Val() string
|
||||
}
|
||||
|
||||
//go:generate mockery --name Cache --output . --outpkg cache --filename mock_cache_test.go --structname mockCache --inpackage
|
||||
|
||||
// Cache cache interface
|
||||
type Cache interface {
|
||||
// Contains returns true if key exists
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user