mirror of https://github.com/goharbor/harbor.git
Compare commits
182 Commits
Author | SHA1 | Date |
---|---|---|
stonezdj(Daojun Zhang) | 232f9ba7ea | |
Wang Yan | 65e266fecf | |
MinerYang | 068ae006fe | |
Shengwen YU | beb5f3f7cb | |
Shengwen YU | 33966fbc79 | |
dependabot[bot] | c4409c053b | |
dependabot[bot] | 1ef61995b8 | |
dependabot[bot] | 34cb462cd9 | |
dependabot[bot] | 132c389216 | |
dependabot[bot] | 50dc773a5a | |
stonezdj(Daojun Zhang) | 8431c9c30a | |
MinerYang | d01dfd450a | |
stonezdj(Daojun Zhang) | d154c27362 | |
Lichao Xue | 9b5dd7951e | |
Shengwen YU | bc3e47f0fe | |
dependabot[bot] | 1146cbeca1 | |
dependabot[bot] | 01a28dc66d | |
dependabot[bot] | 7306f6d7d9 | |
dependabot[bot] | d7ab8254cc | |
stonezdj(Daojun Zhang) | fba4c40c65 | |
Shengwen YU | 9471f5d5a6 | |
Lichao Xue | dee73a44f3 | |
Shengwen YU | c791b39a26 | |
Shengwen YU | 822784aac8 | |
Shengwen YU | d0cb200ed5 | |
Shengwen YU | 0e8dce72be | |
stonezdj(Daojun Zhang) | ec8d692fe6 | |
Shengwen YU | 2af02f3b25 | |
stonezdj(Daojun Zhang) | c80e9bf477 | |
stonezdj(Daojun Zhang) | b7d4bf0d07 | |
MinerYang | ea3cd06171 | |
Shengwen YU | 9b164f3fee | |
Lichao Xue | e7fce62723 | |
stonezdj(Daojun Zhang) | d759429831 | |
stonezdj(Daojun Zhang) | 0d9dc4b4a4 | |
Lichao Xue | b3dc183f47 | |
stonezdj(Daojun Zhang) | 9c3fc28250 | |
Lichao Xue | e8907a47ab | |
Ikko Eltociear Ashimine | 4fd11ce072 | |
stonezdj(Daojun Zhang) | 2ea7d09412 | |
stonezdj(Daojun Zhang) | fb2e0042d0 | |
Shengwen YU | 6709c789fb | |
stonezdj(Daojun Zhang) | 654aa8edcf | |
Shengwen YU | 67c03ddc4f | |
Wang Yan | 550bf1d750 | |
Shengwen YU | 91efec1e2a | |
dependabot[bot] | 938c804513 | |
Iceber Gu | a2507dc3fc | |
dependabot[bot] | 79dbebd48d | |
dependabot[bot] | b8392968ac | |
dependabot[bot] | 8bf26c0d1d | |
MinerYang | 7465a29919 | |
MinerYang | 7e8032b144 | |
MinerYang | e9d2f50669 | |
Shengwen YU | 643e84cdfe | |
Shengwen YU | 4c9e84cae1 | |
stonezdj(Daojun Zhang) | 5d7c668028 | |
stonezdj(Daojun Zhang) | 89995075a7 | |
tostt | a858fb4f4d | |
MinerYang | 2bb5166c80 | |
stonezdj(Daojun Zhang) | 2e7db335b3 | |
MinerYang | 03d9575d84 | |
Wang Yan | 461a5fa50d | |
stonezdj(Daojun Zhang) | be648ea47f | |
guangwu | ff1a5056d7 | |
stonezdj(Daojun Zhang) | 96ba34a93c | |
MinerYang | 389a8c49f4 | |
Shengwen YU | c8370faeeb | |
Shengwen YU | c12064df4e | |
Prima Adi Pradana | 7b8a322a88 | |
stonezdj(Daojun Zhang) | dd76fe47ce | |
stonezdj(Daojun Zhang) | b6366e03e9 | |
guangwu | 9778176ff1 | |
stonezdj(Daojun Zhang) | cea47c7db3 | |
MinerYang | 680a6a828b | |
guangwu | b66d14d9f3 | |
Taras Katkov | da3c85be5a | |
dependabot[bot] | 7c2158bdf9 | |
dependabot[bot] | 6c2cafe7ba | |
dependabot[bot] | 290b22cf17 | |
dependabot[bot] | 6a0ee091d8 | |
dependabot[bot] | ebb8050068 | |
dependabot[bot] | ba840c20d4 | |
dependabot[bot] | 9beede0d82 | |
dependabot[bot] | 4acde986a9 | |
dependabot[bot] | 8b8b88d86a | |
dependabot[bot] | d58172c112 | |
dependabot[bot] | b9659b455b | |
Todd Whiteman | 06f53368cd | |
Lichao Xue | aa4a142bc1 | |
James Kang | fd81e7c43e | |
stonezdj(Daojun Zhang) | 80a9c688fc | |
Wang Yan | 2eb5464603 | |
Shengwen YU | fa01cc5e48 | |
okestro-yj.yoo | 69fc957d7e | |
MinerYang | f7a3392020 | |
MinerYang | a269b4f31c | |
Wang Yan | dbe9790147 | |
jm-nab | 8bec57ffd4 | |
guangwu | 6ca30a3732 | |
Lichao Xue | 9b7c1a2274 | |
stonezdj(Daojun Zhang) | 950fc06a87 | |
Wang Yan | d25f3556a9 | |
Wang Yan | 3782bab80a | |
Lichao Xue | d79e4b1176 | |
stonezdj(Daojun Zhang) | 29cdc398e0 | |
Lichao Xue | 47546a5f9d | |
stonezdj(Daojun Zhang) | 2b6608fb52 | |
dependabot[bot] | c5790ced14 | |
dependabot[bot] | 2fd4588782 | |
stonezdj(Daojun Zhang) | 5b832c1724 | |
dependabot[bot] | 056c41fd80 | |
dependabot[bot] | cb04005098 | |
dependabot[bot] | 35f98344e6 | |
stonezdj(Daojun Zhang) | 54819ba8cd | |
Shijun Sun | fa6b13871f | |
Antoine Jouve | 73c2884e58 | |
Shengwen YU | bca9b14bbf | |
tostt | 0e580836bb | |
Shengwen YU | 1e85cab33a | |
MinerYang | 84b31aaf7f | |
dependabot[bot] | f910c5654b | |
dependabot[bot] | db20b3b6ac | |
dependabot[bot] | 7cfc685b7a | |
dependabot[bot] | f562c3016d | |
dependabot[bot] | 42256ed331 | |
dependabot[bot] | dbbc0207d9 | |
MinerYang | a3e1b1eb79 | |
Shengwen YU | fb1e828547 | |
Shengwen YU | 09d1f8e9fc | |
MinerYang | 522f96b5cf | |
Yang Jiao | 0db0d217a7 | |
Shengwen YU | d88a32089a | |
Bin Liu | 5a576174b1 | |
zycupup | ee6f61c502 | |
dependabot[bot] | 6d854a5534 | |
dependabot[bot] | 6b1e5d2312 | |
dependabot[bot] | 6f6e85863e | |
ShengqiWang | caaa641521 | |
dependabot[bot] | 93d0d2982a | |
Yang Jiao | 308c6cf657 | |
Ha Son Hai | 7e0f6cc501 | |
Shuaiyi | f17d90fadf | |
Wang Yan | fdc012c237 | |
ShengqiWang | 04a140332e | |
Yang Jiao | eb125419cc | |
Wang Yan | 8c0f177299 | |
Yang Jiao | 80930daaac | |
stonezdj(Daojun Zhang) | 891f6785f2 | |
Yang Jiao | 7268a3f3e2 | |
Lars Lehtonen | 49c5a068ad | |
Maksym Trofimenko | 9e5efc99e8 | |
dependabot[bot] | f8d2169712 | |
dependabot[bot] | bb2581c669 | |
Yang Jiao | 64a2296b58 | |
dependabot[bot] | de7ea2849e | |
dependabot[bot] | 88a4cabcaf | |
dependabot[bot] | 378ff62350 | |
Liang Zheng | 4378c15b1c | |
Yang Jiao | aefe1bce9d | |
ShengqiWang | ed4587b491 | |
Blueswen | 0d157f2254 | |
Yang Jiao | e397e86478 | |
ShengqiWang | 923295c990 | |
dependabot[bot] | 49ee3b7759 | |
Yang Jiao | 3d5c3df3cf | |
Yang Jiao | bc63e77cf1 | |
Yang Jiao | 0be4c4cf4c | |
Yang Jiao | 38d3b3512b | |
Yang Jiao | 9a084ec4dd | |
Yang Jiao | 6793da72e3 | |
dependabot[bot] | 2f2a6462ad | |
dependabot[bot] | 41adc7508a | |
dependabot[bot] | 991b2a8174 | |
dependabot[bot] | dcd3c3dbfd | |
Raúl Garcia Sanchez | 09d15969d7 | |
dependabot[bot] | ac03147e08 | |
dependabot[bot] | 80b3ea5501 | |
MinerYang | b38de22054 | |
dependabot[bot] | 5f828ea72f | |
Yang Jiao | a534094ec0 | |
dependabot[bot] | 2532ffe5a8 |
|
@ -41,10 +41,10 @@ jobs:
|
|||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -89,7 +89,7 @@ jobs:
|
|||
bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP
|
||||
df -h
|
||||
- name: Codecov For BackEnd
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/profile.cov
|
||||
flags: unittests
|
||||
|
@ -102,10 +102,10 @@ jobs:
|
|||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -157,10 +157,10 @@ jobs:
|
|||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -212,10 +212,10 @@ jobs:
|
|||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -265,10 +265,10 @@ jobs:
|
|||
- ubuntu-latest
|
||||
timeout-minutes: 100
|
||||
steps:
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -331,7 +331,7 @@ jobs:
|
|||
bash ./tests/showtime.sh ./tests/ci/ui_ut_run.sh
|
||||
df -h
|
||||
- name: Codecov For UI
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
|
||||
flags: unittests
|
||||
|
|
|
@ -13,6 +13,6 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set the author of a PR as the assignee
|
||||
uses: kentaro-m/auto-assign-action@v1.2.5
|
||||
uses: kentaro-m/auto-assign-action@v2.0.0
|
||||
with:
|
||||
configuration-path: ".github/auto-assignees.yml"
|
||||
|
|
|
@ -19,14 +19,14 @@ jobs:
|
|||
- uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v1
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
with:
|
||||
version: '430.0.0'
|
||||
- run: gcloud info
|
||||
- name: Set up Go 1.21
|
||||
- name: Set up Go 1.22
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- name: Setup Docker
|
||||
uses: docker-practice/actions-setup-docker@master
|
||||
|
|
|
@ -26,7 +26,7 @@ jobs:
|
|||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
# with:
|
||||
# languages: go, javascript, csharp, python, cpp, java
|
||||
|
@ -47,5 +47,8 @@ jobs:
|
|||
# make bootstrap
|
||||
# make release
|
||||
|
||||
# to make sure autobuild success, specifify golang version in go.mod
|
||||
# https://github.com/github/codeql/issues/15647#issuecomment-2003768106
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
|
|
@ -23,12 +23,12 @@ jobs:
|
|||
uses: google-github-actions/auth@v2
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v1
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
- run: gcloud info
|
||||
- name: Set up Go 1.21
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.4
|
||||
go-version: 1.22.2
|
||||
id: go
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
|
@ -7,7 +7,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v8.0.0
|
||||
- uses: actions/stale@v9.0.0
|
||||
with:
|
||||
stale-issue-message: 'This issue is being marked stale due to a period of inactivity. If this issue is still relevant, please comment or remove the stale label. Otherwise, this issue will close in 30 days.'
|
||||
stale-pr-message: 'This PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.'
|
||||
|
|
|
@ -12,7 +12,7 @@ jobs:
|
|||
matrix:
|
||||
# maintain the versions of harbor that need to be actively
|
||||
# security scanned
|
||||
versions: [dev, v2.9.0-dev]
|
||||
versions: [dev, v2.10.0-dev]
|
||||
# list of images that need to be scanned
|
||||
images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare]
|
||||
permissions:
|
||||
|
@ -32,6 +32,6 @@ jobs:
|
|||
output: 'trivy-results.sarif'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v2
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- uses: 'google-github-actions/auth@v2'
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
|
||||
- uses: google-github-actions/setup-gcloud@v1
|
||||
- uses: google-github-actions/setup-gcloud@v2
|
||||
with:
|
||||
version: '430.0.0'
|
||||
- name: Prepare Assets
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
source tools/release/release_utils.sh && generateReleaseNotes ${{ env.CUR_TAG }} ${{ env.PRE_TAG }} ${{ secrets.GITHUB_TOKEN }} $release_notes_path
|
||||
echo "RELEASE_NOTES_PATH=$release_notes_path" >> $GITHUB_ENV
|
||||
- name: RC Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: ${{ env.PRERELEASE == 'true' }}
|
||||
with:
|
||||
body_path: ${{ env.RELEASE_NOTES_PATH }}
|
||||
|
@ -77,7 +77,7 @@ jobs:
|
|||
${{ env.OFFLINE_PACKAGE_PATH }}.asc
|
||||
${{ env.MD5SUM_PATH }}
|
||||
- name: GA Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: ${{ env.PRERELEASE == 'false' }}
|
||||
with:
|
||||
body_path: ${{ env.RELEASE_NOTES_PATH }}
|
||||
|
|
|
@ -56,3 +56,5 @@ src/server/v2.0/models/
|
|||
src/server/v2.0/restapi/
|
||||
.editorconfig
|
||||
|
||||
harborclient/
|
||||
openapi-generator-cli.jar
|
||||
|
|
|
@ -39,6 +39,7 @@ be added to this list as they transition to production deployments.
|
|||
<a href="https://www.dynatrace.com/" target="_blank" border="0"><img alt="Dynatrace" src="https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/users-partners/dynatrace-logo.png"></a>
|
||||
<a href="https://www.home.cern/" target="_blank" border="0">CERN</a>
|
||||
<a href="https://www.ns.nl/" target="_blank" border="0"><img alt="Nederlandse Spoorwegen" src="https://raw.githubusercontent.com/goharbor/website/main/docs/img/adopters/nederlandse-spoorwegen.png" height="40"></a>
|
||||
<a href="https://www.de-cix.net/" target="_blank" border="0"><img alt="DE-CIX" src="https://raw.githubusercontent.com/goharbor/website/main/docs/img/adopters/de-cix.png" height="50"></a>
|
||||
|
||||
## Success Stories
|
||||
|
||||
|
@ -88,6 +89,8 @@ feature within Harbor before deploying images into production.
|
|||
and scan customized container images for different business applications, like
|
||||
ELK stack, as part of their CI/CD pipeline.
|
||||
|
||||
**DE-CIX:** Harbor has been integrated into the application stack to replace the former hosted Docker registry, now known as the Distribution Registry. With Harbor, we have started separating access to project-related images using OIDC group mapping and robot accounts with dedicated permissions. Another significant benefit comes with the implemented vulnerability scanner, which makes vulnerabilities more transparent to our teams.
|
||||
|
||||
## Adding your logo
|
||||
|
||||
If you would like to add your logo here and to the `Users and Partners of Harbor` section of the website, add a PNG or SVG version of your logo to the [adopters](https://github.com/goharbor/website/tree/main/docs/img/adopters) directory of the [website](https://github.com/goharbor/website) and submit a pull request with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). We will follow up and make the change in the goharbor.io website as well.
|
||||
|
|
|
@ -164,7 +164,8 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
|
|||
| 2.7 | 1.19.4 |
|
||||
| 2.8 | 1.20.6 |
|
||||
| 2.9 | 1.21.3 |
|
||||
| 2.10 | 1.21.4 |
|
||||
| 2.10 | 1.21.8 |
|
||||
| 2.11 | 1.22.2 |
|
||||
|
||||
|
||||
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
|
||||
|
|
20
Makefile
20
Makefile
|
@ -104,8 +104,8 @@ PREPARE_VERSION_NAME=versions
|
|||
|
||||
#versions
|
||||
REGISTRYVERSION=v2.8.3-patch-redis
|
||||
TRIVYVERSION=v0.47.0
|
||||
TRIVYADAPTERVERSION=v0.30.19
|
||||
TRIVYVERSION=v0.50.4
|
||||
TRIVYADAPTERVERSION=v0.31.1
|
||||
|
||||
# version of registry for pulling the source code
|
||||
REGISTRY_SRC_TAG=v2.8.3
|
||||
|
@ -140,7 +140,7 @@ GOINSTALL=$(GOCMD) install
|
|||
GOTEST=$(GOCMD) test
|
||||
GODEP=$(GOTEST) -i
|
||||
GOFMT=gofmt -w
|
||||
GOBUILDIMAGE=golang:1.21.4
|
||||
GOBUILDIMAGE=golang:1.22.2
|
||||
GOBUILDPATHINCONTAINER=/harbor
|
||||
|
||||
# go build
|
||||
|
@ -312,7 +312,7 @@ gen_apis: lint_apis
|
|||
|
||||
|
||||
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
|
||||
MOCKERY_VERSION=v2.35.4
|
||||
MOCKERY_VERSION=v2.42.2
|
||||
MOCKERY=$(RUNCONTAINER) ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
|
||||
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
|
||||
|
||||
|
@ -452,16 +452,6 @@ package_offline: update_prepare_version compile build
|
|||
@rm -rf $(HARBORPKG)
|
||||
@echo "Done."
|
||||
|
||||
gosec:
|
||||
#go get github.com/securego/gosec/cmd/gosec
|
||||
#go get github.com/dghubble/sling
|
||||
@echo "run secure go scan ..."
|
||||
@if [ "$(GOSECRESULTS)" != "" ] ; then \
|
||||
$(GOPATH)/bin/gosec -fmt=json -out=$(GOSECRESULTS) -quiet ./... | true ; \
|
||||
else \
|
||||
$(GOPATH)/bin/gosec -fmt=json -out=harbor_gas_output.json -quiet ./... | true ; \
|
||||
fi
|
||||
|
||||
go_check: gen_apis mocks_check misspell commentfmt lint
|
||||
|
||||
commentfmt:
|
||||
|
@ -479,7 +469,7 @@ misspell:
|
|||
@find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error
|
||||
|
||||
# golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation
|
||||
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.51.2
|
||||
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2
|
||||
GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint
|
||||
lint:
|
||||
@echo checking lint
|
||||
|
|
10
README.md
10
README.md
|
@ -33,8 +33,8 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
|
|||
* **Role based access control**: Users access different repositories through 'projects' and a user can have different permission for images or Helm charts under a project.
|
||||
* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with using filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. This can be used to assist loadbalancing, achieve high availability, and facilitate multi-datacenter deployments in hybrid and multi-cloud scenarios.
|
||||
* **Vulnerability Scanning**: Harbor scans images regularly for vulnerabilities and has policy checks to prevent vulnerable images from being deployed.
|
||||
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor that can then be given permissions to specific projects.
|
||||
* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal.
|
||||
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor that can then be given permissions to specific projects.
|
||||
* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal.
|
||||
* **Image deletion & garbage collection**: System admin can run garbage collection jobs so that images(dangling manifests and unreferenced blobs) can be deleted and their space can be freed up periodically.
|
||||
* **Notary**: Support signing container images using Docker Content Trust (leveraging Notary) for guaranteeing authenticity and provenance. In addition, policies that prevent unsigned images from being deployed can also be activated.
|
||||
* **Graphical user portal**: User can easily browse, search repositories and manage projects.
|
||||
|
@ -55,7 +55,7 @@ For learning the architecture design of Harbor, check the document [Architecture
|
|||
|
||||
**System requirements:**
|
||||
|
||||
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ .
|
||||
**On a Linux host:** docker 20.10.10-ce+ and docker-compose 1.18.0+ .
|
||||
|
||||
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](https://goharbor.io/docs/latest/install-config/)** to install Harbor.
|
||||
|
||||
|
@ -77,8 +77,8 @@ The [compatibility list](https://goharbor.io/docs/edge/install-config/harbor-com
|
|||
|
||||
## Community
|
||||
|
||||
* **Twitter:** [@project_harbor](https://twitter.com/project_harbor)
|
||||
* **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback.
|
||||
* **Twitter:** [@project_harbor](https://twitter.com/project_harbor)
|
||||
* **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback.
|
||||
* **Developer Group:** Join Harbor developer group: [harbor-dev@lists.cncf.io](https://lists.cncf.io/g/harbor-dev) for discussion on Harbor development and contribution.
|
||||
* **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: [#harbor](https://cloud-native.slack.com/messages/harbor/) and [#harbor-dev](https://cloud-native.slack.com/messages/harbor-dev/)
|
||||
|
||||
|
|
10
RELEASES.md
10
RELEASES.md
|
@ -14,11 +14,11 @@ Patch releases are based on the major/minor release branch, the release cadence
|
|||
`Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note they are done to assist in the stabilization process, no guarantees are provided.
|
||||
|
||||
### Minor Release Support Matrix
|
||||
| Version | Supported |
|
||||
|---------------| ------------------ |
|
||||
| Harbor v2.9.x | :white_check_mark: |
|
||||
| Harbor v2.8.x | :white_check_mark: |
|
||||
| Harbor v2.7.x | :white_check_mark: |
|
||||
| Version | Supported |
|
||||
|----------------| ------------------ |
|
||||
| Harbor v2.10.x | :white_check_mark: |
|
||||
| Harbor v2.9.x | :white_check_mark: |
|
||||
| Harbor v2.8.x | :white_check_mark: |
|
||||
|
||||
### Upgrade path and support policy
|
||||
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.
|
||||
|
|
|
@ -991,6 +991,12 @@ paths:
|
|||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_sbom_overview
|
||||
in: query
|
||||
description: Specify whether the SBOM overview is included in returning artifacts, when this option is true, the SBOM overview will be included in the response
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_signature
|
||||
in: query
|
||||
description: Specify whether the signature is included inside the tags of the returning artifacts. Only works when setting "with_tag=true"
|
||||
|
@ -1096,6 +1102,12 @@ paths:
|
|||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_sbom_overview
|
||||
in: query
|
||||
description: Specify whether the SBOM overview is included in returning artifact, when this option is true, the SBOM overview will be included in the response
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
- name: with_accessory
|
||||
in: query
|
||||
description: Specify whether the accessories are included of the returning artifacts.
|
||||
|
@ -1164,6 +1176,11 @@ paths:
|
|||
- $ref: '#/parameters/projectName'
|
||||
- $ref: '#/parameters/repositoryName'
|
||||
- $ref: '#/parameters/reference'
|
||||
- name: scanType
|
||||
in: body
|
||||
required: false
|
||||
schema:
|
||||
$ref: '#/definitions/ScanType'
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
|
@ -1175,6 +1192,8 @@ paths:
|
|||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/stop:
|
||||
|
@ -1189,6 +1208,12 @@ paths:
|
|||
- $ref: '#/parameters/projectName'
|
||||
- $ref: '#/parameters/repositoryName'
|
||||
- $ref: '#/parameters/reference'
|
||||
- name: scanType
|
||||
in: body
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/ScanType'
|
||||
description: 'The scan type: Vulnerabilities, SBOM'
|
||||
responses:
|
||||
'202':
|
||||
$ref: '#/responses/202'
|
||||
|
@ -1200,6 +1225,8 @@ paths:
|
|||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/scan/{report_id}/log:
|
||||
|
@ -1226,6 +1253,8 @@ paths:
|
|||
description: Successfully get scan log file
|
||||
schema:
|
||||
type: string
|
||||
'400':
|
||||
$ref: '#/responses/400'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
'403':
|
||||
|
@ -1432,7 +1461,7 @@ paths:
|
|||
in: path
|
||||
description: The type of addition.
|
||||
type: string
|
||||
enum: [build_history, values.yaml, readme.md, dependencies]
|
||||
enum: [build_history, values.yaml, readme.md, dependencies, sbom]
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
|
@ -1451,6 +1480,8 @@ paths:
|
|||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/projects/{project_name}/repositories/{repository_name}/artifacts/{reference}/labels:
|
||||
|
@ -4719,7 +4750,7 @@ paths:
|
|||
summary: Get job log by job id
|
||||
description: Get job log by job id, it is only used by administrator
|
||||
produces:
|
||||
- text/plain
|
||||
- text/plain
|
||||
tags:
|
||||
- jobservice
|
||||
parameters:
|
||||
|
@ -4798,6 +4829,8 @@ paths:
|
|||
$ref: '#/responses/403'
|
||||
'404':
|
||||
$ref: '#/responses/404'
|
||||
'422':
|
||||
$ref: '#/responses/422'
|
||||
'500':
|
||||
$ref: '#/responses/500'
|
||||
/schedules:
|
||||
|
@ -4850,7 +4883,6 @@ paths:
|
|||
'200':
|
||||
description: Get scheduler status successfully.
|
||||
schema:
|
||||
type: object
|
||||
$ref: '#/definitions/SchedulerStatus'
|
||||
'401':
|
||||
$ref: '#/responses/401'
|
||||
|
@ -6072,7 +6104,7 @@ paths:
|
|||
description: Specify whether the dangerous Artifact are included inside summary information
|
||||
type: boolean
|
||||
required: false
|
||||
default: false
|
||||
default: false
|
||||
responses:
|
||||
'200':
|
||||
description: Success
|
||||
|
@ -6091,15 +6123,15 @@ paths:
|
|||
get:
|
||||
summary: Get the vulnerability list.
|
||||
description: |
|
||||
Get the vulnerability list. use q to pass the query condition,
|
||||
Get the vulnerability list. use q to pass the query condition,
|
||||
supported conditions:
|
||||
cve_id(exact match)
|
||||
cvss_score_v3(range condition)
|
||||
severity(exact match)
|
||||
repository_name(exact match)
|
||||
project_id(exact match)
|
||||
repository_name(exact match)
|
||||
project_id(exact match)
|
||||
package(exact match)
|
||||
tag(exact match)
|
||||
tag(exact match)
|
||||
digest(exact match)
|
||||
tags:
|
||||
- securityhub
|
||||
|
@ -6432,6 +6464,14 @@ responses:
|
|||
type: string
|
||||
schema:
|
||||
$ref: '#/definitions/Errors'
|
||||
'422':
|
||||
description: Unsupported Type
|
||||
headers:
|
||||
X-Request-Id:
|
||||
description: The ID of the corresponding request for the response
|
||||
type: string
|
||||
schema:
|
||||
$ref: '#/definitions/Errors'
|
||||
'500':
|
||||
description: Internal server error
|
||||
headers:
|
||||
|
@ -6593,6 +6633,9 @@ definitions:
|
|||
scan_overview:
|
||||
$ref: '#/definitions/ScanOverview'
|
||||
description: The overview of the scan result.
|
||||
sbom_overview:
|
||||
$ref: '#/definitions/SBOMOverview'
|
||||
description: The overview of the generating SBOM progress
|
||||
accessories:
|
||||
type: array
|
||||
items:
|
||||
|
@ -6744,6 +6787,37 @@ definitions:
|
|||
description: 'The scan overview attached in the metadata of tag'
|
||||
additionalProperties:
|
||||
$ref: '#/definitions/NativeReportSummary'
|
||||
SBOMOverview:
|
||||
type: object
|
||||
description: 'The generate SBOM overview information'
|
||||
properties:
|
||||
start_time:
|
||||
type: string
|
||||
format: date-time
|
||||
description: 'The start time of the generating sbom report task'
|
||||
example: '2006-01-02T14:04:05Z'
|
||||
end_time:
|
||||
type: string
|
||||
format: date-time
|
||||
description: 'The end time of the generating sbom report task'
|
||||
example: '2006-01-02T15:04:05Z'
|
||||
scan_status:
|
||||
type: string
|
||||
description: 'The status of the generating SBOM task'
|
||||
sbom_digest:
|
||||
type: string
|
||||
description: 'The digest of the generated SBOM accessory'
|
||||
report_id:
|
||||
type: string
|
||||
description: 'id of the native scan report'
|
||||
example: '5f62c830-f996-11e9-957f-0242c0a89008'
|
||||
duration:
|
||||
type: integer
|
||||
format: int64
|
||||
description: 'Time in seconds required to create the report'
|
||||
example: 300
|
||||
scanner:
|
||||
$ref: '#/definitions/Scanner'
|
||||
NativeReportSummary:
|
||||
type: object
|
||||
description: 'The summary for the native report'
|
||||
|
@ -7165,6 +7239,10 @@ definitions:
|
|||
type: string
|
||||
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
|
||||
x-nullable: true
|
||||
auto_sbom_generation:
|
||||
type: string
|
||||
description: 'Whether generating SBOM automatically when pushing a subject artifact. The valid values are "true", "false".'
|
||||
x-nullable: true
|
||||
reuse_sys_cve_allowlist:
|
||||
type: string
|
||||
description: 'Whether this project reuse the system level CVE allowlist as the allowlist of its own. The valid values are "true", "false".
|
||||
|
@ -7657,8 +7735,9 @@ definitions:
|
|||
description: The level of the robot, project or system
|
||||
duration:
|
||||
type: integer
|
||||
x-nullable: true
|
||||
format: int64
|
||||
description: The duration of the robot in days
|
||||
description: The duration of the robot in days, duration must be either -1(Never) or a positive integer
|
||||
editable:
|
||||
type: boolean
|
||||
x-omitempty: false
|
||||
|
@ -7705,7 +7784,7 @@ definitions:
|
|||
duration:
|
||||
type: integer
|
||||
format: int64
|
||||
description: The duration of the robot in days
|
||||
description: The duration of the robot in days, duration must be either -1(Never) or a positive integer
|
||||
permissions:
|
||||
type: array
|
||||
items:
|
||||
|
@ -7757,7 +7836,7 @@ definitions:
|
|||
properties:
|
||||
resource:
|
||||
type: string
|
||||
description: The resource of the access. Possible resources are *, artifact, artifact-addition, artifact-label, audit-log, catalog, configuration, distribution, garbage-collection, helm-chart, helm-chart-version, helm-chart-version-label, immutable-tag, label, ldap-user, log, member, metadata, notification-policy, preheat-instance, preheat-policy, project, quota, registry, replication, replication-adapter, replication-policy, repository, robot, scan, scan-all, scanner, system-volumes, tag, tag-retention, user, user-group or "" (for self-reference).
|
||||
description: The resource of the access. Possible resources are listed here for system and project level https://github.com/goharbor/harbor/blob/main/src/common/rbac/const.go
|
||||
action:
|
||||
type: string
|
||||
description: The action of the access. Possible actions are *, pull, push, create, read, update, delete, list, operate, scanner-pull and stop.
|
||||
|
@ -7995,7 +8074,7 @@ definitions:
|
|||
type: string
|
||||
description: |
|
||||
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manual', 'None' and 'Schedule'.
|
||||
'Manual' means to trigger it right away, 'Schedule' means to trigger it by a specified cron schedule and
|
||||
'Manual' means to trigger it right away, 'Schedule' means to trigger it by a specified cron schedule and
|
||||
'None' means to cancel the schedule.
|
||||
enum:
|
||||
- Hourly
|
||||
|
@ -8364,6 +8443,11 @@ definitions:
|
|||
default: ""
|
||||
description: Indicate the healthy of the registration
|
||||
example: "healthy"
|
||||
capabilities:
|
||||
type: object
|
||||
description: Indicates the capabilities of the scanner, e.g. support_vulnerability or support_sbom.
|
||||
additionalProperties: True
|
||||
example: {"support_vulnerability": true, "support_sbom": true}
|
||||
|
||||
ScannerRegistrationReq:
|
||||
type: object
|
||||
|
@ -8446,6 +8530,12 @@ definitions:
|
|||
ScannerCapability:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
description: |
|
||||
Specify the type of scanner capability, like vulnerability or sbom
|
||||
x-omitempty: false
|
||||
example: "sbom"
|
||||
consumes_mime_types:
|
||||
type: array
|
||||
items:
|
||||
|
@ -9814,12 +9904,12 @@ definitions:
|
|||
type: object
|
||||
description: the dangerous CVE information
|
||||
properties:
|
||||
cve_id:
|
||||
cve_id:
|
||||
type: string
|
||||
description: the cve id
|
||||
severity:
|
||||
type: string
|
||||
description: the severity of the CVE
|
||||
description: the severity of the CVE
|
||||
cvss_score_v3:
|
||||
type: number
|
||||
format: float64
|
||||
|
@ -9829,7 +9919,7 @@ definitions:
|
|||
description: the description of the CVE
|
||||
package:
|
||||
type: string
|
||||
description: the package of the CVE
|
||||
description: the package of the CVE
|
||||
version:
|
||||
type: string
|
||||
description: the version of the package
|
||||
|
@ -9837,14 +9927,14 @@ definitions:
|
|||
type: object
|
||||
description: the dangerous artifact information
|
||||
properties:
|
||||
project_id:
|
||||
project_id:
|
||||
type: integer
|
||||
format: int64
|
||||
description: the project id of the artifact
|
||||
repository_name:
|
||||
type: string
|
||||
description: the repository name of the artifact
|
||||
digest:
|
||||
digest:
|
||||
type: string
|
||||
description: the digest of the artifact
|
||||
critical_cnt:
|
||||
|
@ -9904,6 +9994,13 @@ definitions:
|
|||
description: The description of the vulnerability
|
||||
links:
|
||||
type: array
|
||||
items:
|
||||
items:
|
||||
type: string
|
||||
description: Links of the vulnerability
|
||||
ScanType:
|
||||
type: object
|
||||
properties:
|
||||
scan_type:
|
||||
type: string
|
||||
description: 'The scan type for the scan request. Two options are currently supported, vulnerability and sbom'
|
||||
enum: [ vulnerability, sbom ]
|
Binary file not shown.
After Width: | Height: | Size: 118 KiB |
|
@ -1,5 +1,5 @@
|
|||
#!/bin/bash
|
||||
#docker version: 17.06.0+
|
||||
#docker version: 20.10.10+
|
||||
#docker-compose version: 1.18.0+
|
||||
#golang version: 1.12.0+
|
||||
|
||||
|
@ -78,7 +78,7 @@ function check_golang {
|
|||
function check_docker {
|
||||
if ! docker --version &> /dev/null
|
||||
then
|
||||
error "Need to install docker(17.06.0+) first and run this script again."
|
||||
error "Need to install docker(20.10.10+) first and run this script again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -93,7 +93,7 @@ function check_docker {
|
|||
# the version of docker does not meet the requirement
|
||||
if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ])
|
||||
then
|
||||
error "Need to upgrade docker package to 17.06.0+."
|
||||
error "Need to upgrade docker package to 20.10.10+."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
|
|
|
@ -16,6 +16,18 @@ https:
|
|||
# The path of cert and key files for nginx
|
||||
certificate: /your/certificate/path
|
||||
private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
|
||||
# # Uncomment following will enable tls communication between all harbor components
|
||||
# internal_tls:
|
||||
|
@ -23,8 +35,7 @@ https:
|
|||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
# # enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
|
@ -62,7 +73,8 @@ data_volume: /data
|
|||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
|
@ -86,6 +98,10 @@ trivy:
|
|||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
skip_update: false
|
||||
#
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: false
|
||||
#
|
||||
# The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
|
@ -99,6 +115,11 @@ trivy:
|
|||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
insecure: false
|
||||
#
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: 5m0s
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
|
@ -153,7 +174,7 @@ log:
|
|||
# port: 5140
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.10.0
|
||||
_version: 2.11.0
|
||||
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
|
@ -167,17 +188,6 @@ _version: 2.10.0
|
|||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_db_index: 7
|
||||
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
|
@ -248,7 +258,7 @@ proxy:
|
|||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differenciate different harbor services
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
|
@ -301,6 +311,6 @@ cache:
|
|||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# # the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
table artifact:
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
type varchar(255) NOT NULL,
|
||||
media_type varchar(255) NOT NULL,
|
||||
manifest_media_type varchar(255) NOT NULL,
|
||||
artifact_type varchar(255) NOT NULL,
|
||||
project_id int NOT NULL,
|
||||
repository_id int NOT NULL,
|
||||
repository_name varchar(255) NOT NULL,
|
||||
digest varchar(255) NOT NULL,
|
||||
size bigint,
|
||||
push_time timestamp default CURRENT_TIMESTAMP,
|
||||
pull_time timestamp,
|
||||
extra_attrs text,
|
||||
annotations jsonb,
|
||||
CONSTRAINT unique_artifact UNIQUE (repository_id, digest)
|
||||
*/
|
||||
|
||||
/*
|
||||
Add new column artifact_type for artifact table to work with oci-spec v1.1.0 list referrer api
|
||||
*/
|
||||
ALTER TABLE artifact ADD COLUMN IF NOT EXISTS artifact_type varchar(255);
|
||||
|
||||
/*
|
||||
set value for artifact_type
|
||||
then set column artifact_type as not null
|
||||
*/
|
||||
UPDATE artifact SET artifact_type = media_type WHERE artifact_type IS NULL;
|
||||
|
||||
ALTER TABLE artifact ALTER COLUMN artifact_type SET NOT NULL;
|
|
@ -12,7 +12,7 @@ COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
|
|||
RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
|
||||
&& chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh", "13", "14"]
|
||||
ENTRYPOINT ["/docker-entrypoint.sh", "14", "15"]
|
||||
HEALTHCHECK CMD ["/docker-healthcheck.sh"]
|
||||
|
||||
USER postgres
|
||||
|
|
|
@ -6,15 +6,15 @@ RUN tdnf install -y shadow >> /dev/null \
|
|||
&& groupadd -r postgres --gid=999 \
|
||||
&& useradd -m -r -g postgres --uid=999 postgres
|
||||
|
||||
RUN tdnf install -y postgresql13-server >> /dev/null
|
||||
RUN tdnf install -y gzip postgresql14-server findutils bc >> /dev/null \
|
||||
RUN tdnf install -y postgresql14-server >> /dev/null
|
||||
RUN tdnf install -y gzip postgresql15-server findutils bc >> /dev/null \
|
||||
&& mkdir -p /docker-entrypoint-initdb.d \
|
||||
&& mkdir -p /run/postgresql \
|
||||
&& chown -R postgres:postgres /run/postgresql \
|
||||
&& chmod 2777 /run/postgresql \
|
||||
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
|
||||
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/pgsql/14/share/postgresql/postgresql.conf.sample \
|
||||
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/pgsql/14/share/postgresql/postgresql.conf.sample \
|
||||
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/pgsql/15/share/postgresql/postgresql.conf.sample \
|
||||
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/pgsql/15/share/postgresql/postgresql.conf.sample \
|
||||
&& tdnf clean all
|
||||
|
||||
RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools
|
||||
|
|
|
@ -10,7 +10,7 @@ from migrations import accept_versions
|
|||
@click.command()
|
||||
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
|
||||
@click.option('-o', '--output', default='', help="the path of output config file")
|
||||
@click.option('-t', '--target', default='2.10.0', help="target version of input path")
|
||||
@click.option('-t', '--target', default='2.11.0', help="target version of input path")
|
||||
def migrate(input_, output, target):
|
||||
"""
|
||||
migrate command will migrate config file style to specific version
|
||||
|
|
|
@ -2,4 +2,4 @@ import os
|
|||
|
||||
MIGRATION_BASE_DIR = os.path.dirname(__file__)
|
||||
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0'}
|
||||
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0', '2.11.0'}
|
|
@ -23,6 +23,12 @@ https:
|
|||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
|
@ -31,6 +37,8 @@ https:
|
|||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
|
@ -38,13 +46,9 @@ https:
|
|||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if internal_tls.strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ internal_tls.strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
|
@ -52,8 +56,6 @@ internal_tls:
|
|||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
# # enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
import os
|
||||
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
|
||||
from utils.migration import read_conf
|
||||
|
||||
revision = '2.11.0'
|
||||
down_revisions = ['2.10.0']
|
||||
|
||||
def migrate(input_cfg, output_cfg):
|
||||
current_dir = os.path.dirname(__file__)
|
||||
tpl = Environment(
|
||||
loader=FileSystemLoader(current_dir),
|
||||
undefined=StrictUndefined,
|
||||
trim_blocks=True,
|
||||
lstrip_blocks=True,
|
||||
autoescape = select_autoescape()
|
||||
).get_template('harbor.yml.jinja')
|
||||
|
||||
config_dict = read_conf(input_cfg)
|
||||
|
||||
with open(output_cfg, 'w') as f:
|
||||
f.write(tpl.render(**config_dict))
|
|
@ -0,0 +1,737 @@
|
|||
# Configuration file of Harbor
|
||||
|
||||
# The IP address or hostname to access admin UI and registry service.
|
||||
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
|
||||
hostname: {{ hostname }}
|
||||
|
||||
# http related config
|
||||
{% if http is defined %}
|
||||
http:
|
||||
# port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
port: {{ http.port }}
|
||||
{% else %}
|
||||
# http:
|
||||
# # port for http, default is 80. If https enabled, this port will redirect to https port
|
||||
# port: 80
|
||||
{% endif %}
|
||||
|
||||
{% if https is defined %}
|
||||
# https related config
|
||||
https:
|
||||
# https port for harbor, default is 443
|
||||
port: {{ https.port }}
|
||||
# The path of cert and key files for nginx
|
||||
certificate: {{ https.certificate }}
|
||||
private_key: {{ https.private_key }}
|
||||
# enable strong ssl ciphers (default: false)
|
||||
{% if strong_ssl_ciphers is defined %}
|
||||
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
|
||||
{% else %}
|
||||
strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# https related config
|
||||
# https:
|
||||
# # https port for harbor, default is 443
|
||||
# port: 443
|
||||
# # The path of cert and key files for nginx
|
||||
# certificate: /your/certificate/path
|
||||
# private_key: /your/private/key/path
|
||||
# enable strong ssl ciphers (default: false)
|
||||
# strong_ssl_ciphers: false
|
||||
{% endif %}
|
||||
|
||||
# # Harbor will set ipv4 enabled only by default if this block is not configured
|
||||
# # Otherwise, please uncomment this block to configure your own ip_family stacks
|
||||
{% if ip_family is defined %}
|
||||
ip_family:
|
||||
# ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
{% if ip_family.ipv6 is defined %}
|
||||
ipv6:
|
||||
enabled: {{ ip_family.ipv6.enabled | lower }}
|
||||
{% else %}
|
||||
ipv6:
|
||||
enabled: false
|
||||
{% endif %}
|
||||
# ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
{% if ip_family.ipv4 is defined %}
|
||||
ipv4:
|
||||
enabled: {{ ip_family.ipv4.enabled | lower }}
|
||||
{% else %}
|
||||
ipv4:
|
||||
enabled: true
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# ip_family:
|
||||
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
|
||||
# ipv6:
|
||||
# enabled: false
|
||||
# # ipv4Enabled set to true by default, currently it affected the nginx related component
|
||||
# ipv4:
|
||||
# enabled: true
|
||||
{% endif %}
|
||||
|
||||
{% if internal_tls is defined %}
|
||||
# Uncomment following will enable tls communication between all harbor components
|
||||
internal_tls:
|
||||
# set enabled to true means internal tls is enabled
|
||||
enabled: {{ internal_tls.enabled | lower }}
|
||||
{% if internal_tls.dir is defined %}
|
||||
# put your cert and key files on dir
|
||||
dir: {{ internal_tls.dir }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# internal_tls:
|
||||
# # set enabled to true means internal tls is enabled
|
||||
# enabled: true
|
||||
# # put your cert and key files on dir
|
||||
# dir: /etc/harbor/tls/internal
|
||||
{% endif %}
|
||||
|
||||
# Uncomment external_url if you want to enable external proxy
|
||||
# And when it enabled the hostname will no longer used
|
||||
{% if external_url is defined %}
|
||||
external_url: {{ external_url }}
|
||||
{% else %}
|
||||
# external_url: https://reg.mydomain.com:8433
|
||||
{% endif %}
|
||||
|
||||
# The initial password of Harbor admin
|
||||
# It only works in first time to install harbor
|
||||
# Remember Change the admin password from UI after launching Harbor.
|
||||
{% if harbor_admin_password is defined %}
|
||||
harbor_admin_password: {{ harbor_admin_password }}
|
||||
{% else %}
|
||||
harbor_admin_password: Harbor12345
|
||||
{% endif %}
|
||||
|
||||
# Harbor DB configuration
|
||||
database:
|
||||
{% if database is defined %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: {{ database.password}}
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: {{ database.max_idle_conns }}
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: {{ database.max_open_conns }}
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_lifetime is defined %}
|
||||
conn_max_lifetime: {{ database.conn_max_lifetime }}
|
||||
{% else %}
|
||||
conn_max_lifetime: 5m
|
||||
{% endif %}
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
{% if database.conn_max_idle_time is defined %}
|
||||
conn_max_idle_time: {{ database.conn_max_idle_time }}
|
||||
{% else %}
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# The password for the root user of Harbor DB. Change this before any production use.
|
||||
password: root123
|
||||
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
|
||||
max_idle_conns: 100
|
||||
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
|
||||
# Note: the default number of connections is 1024 for postgres of harbor.
|
||||
max_open_conns: 900
|
||||
# The maximum amount of time a connection may be reused. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's age.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_lifetime: 5m
|
||||
# The maximum amount of time a connection may be idle. Expired connections may be closed lazily before reuse. If it <= 0, connections are not closed due to a connection's idle time.
|
||||
# The value is a duration string. A duration string is a possibly signed sequence of decimal numbers, each with optional fraction and a unit suffix, such as "300ms", "-1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
|
||||
conn_max_idle_time: 0
|
||||
{% endif %}
|
||||
|
||||
{% if data_volume is defined %}
|
||||
# The default data volume
|
||||
data_volume: {{ data_volume }}
|
||||
{% else %}
|
||||
# The default data volume
|
||||
data_volume: /data
|
||||
{% endif %}
|
||||
|
||||
# Harbor Storage settings by default is using /data dir on local filesystem
|
||||
# Uncomment storage_service setting If you want to using external storage
|
||||
{% if storage_service is defined %}
|
||||
storage_service:
|
||||
{% for key, value in storage_service.items() %}
|
||||
{% if key == 'ca_bundle' %}
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
ca_bundle: {{ value if value is not none else '' }}
|
||||
{% elif key == 'redirect' %}
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
redirect:
|
||||
{% if storage_service.redirect.disabled is defined %}
|
||||
disable: {{ storage_service.redirect.disabled | lower}}
|
||||
{% else %}
|
||||
disable: {{ storage_service.redirect.disable | lower}}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
{{ key }}:
|
||||
{% for k, v in value.items() %}
|
||||
{{ k }}: {{ v if v is not none else '' }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# storage_service:
|
||||
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
|
||||
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
|
||||
# ca_bundle:
|
||||
|
||||
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
|
||||
# # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
|
||||
# # and https://distribution.github.io/distribution/storage-drivers/
|
||||
# filesystem:
|
||||
# maxthreads: 100
|
||||
# # set disable to true when you want to disable registry redirect
|
||||
# redirect:
|
||||
# disable: false
|
||||
{% endif %}
|
||||
|
||||
# Trivy configuration
|
||||
#
|
||||
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
|
||||
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
|
||||
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
|
||||
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
|
||||
# 12 hours and published as a new release to GitHub.
|
||||
{% if trivy is defined %}
|
||||
trivy:
|
||||
# ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
{% if trivy.ignore_unfixed is defined %}
|
||||
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
|
||||
{% else %}
|
||||
ignore_unfixed: false
|
||||
{% endif %}
|
||||
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
#
|
||||
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
{% if trivy.skip_update is defined %}
|
||||
skip_update: {{ trivy.skip_update | lower }}
|
||||
{% else %}
|
||||
skip_update: false
|
||||
{% endif %}
|
||||
{% if trivy.skip_java_db_update is defined %}
|
||||
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
skip_java_db_update: {{ trivy.skip_java_db_update | lower }}
|
||||
{% else %}
|
||||
skip_java_db_update: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.offline_scan is defined %}
|
||||
offline_scan: {{ trivy.offline_scan | lower }}
|
||||
{% else %}
|
||||
offline_scan: false
|
||||
{% endif %}
|
||||
#
|
||||
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
|
||||
{% if trivy.security_check is defined %}
|
||||
security_check: {{ trivy.security_check }}
|
||||
{% else %}
|
||||
security_check: vuln
|
||||
{% endif %}
|
||||
#
|
||||
# insecure The flag to skip verifying registry certificate
|
||||
{% if trivy.insecure is defined %}
|
||||
insecure: {{ trivy.insecure | lower }}
|
||||
{% else %}
|
||||
insecure: false
|
||||
{% endif %}
|
||||
#
|
||||
{% if trivy.timeout is defined %}
|
||||
# timeout The duration to wait for scan completion.
|
||||
# There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
timeout: {{ trivy.timeout}}
|
||||
{% else %}
|
||||
timeout: 5m0s
|
||||
{% endif %}
|
||||
#
|
||||
# github_token The GitHub access token to download Trivy DB
|
||||
#
|
||||
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# https://developer.github.com/v3/#rate-limiting
|
||||
#
|
||||
# You can create a GitHub token by following the instructions in
|
||||
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
#
|
||||
{% if trivy.github_token is defined %}
|
||||
github_token: {{ trivy.github_token }}
|
||||
{% else %}
|
||||
# github_token: xxx
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# trivy:
|
||||
# # ignoreUnfixed The flag to display only fixed vulnerabilities
|
||||
# ignore_unfixed: false
|
||||
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
|
||||
# #
|
||||
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
|
||||
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
|
||||
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
|
||||
# skip_update: false
|
||||
# #
|
||||
# # skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
|
||||
# # `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
|
||||
# skip_java_db_update: false
|
||||
# #
|
||||
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
|
||||
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
|
||||
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
|
||||
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
|
||||
# # It would work if all the dependencies are in local.
|
||||
# # This option doesn’t affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
|
||||
# offline_scan: false
|
||||
# #
|
||||
# # insecure The flag to skip verifying registry certificate
|
||||
# insecure: false
|
||||
# # github_token The GitHub access token to download Trivy DB
|
||||
# #
|
||||
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
|
||||
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
|
||||
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
|
||||
# # https://developer.github.com/v3/#rate-limiting
|
||||
# #
|
||||
# # timeout The duration to wait for scan completion.
|
||||
# # There is upper bound of 30 minutes defined in scan job. So if this `timeout` is larger than 30m0s, it will also timeout at 30m0s.
|
||||
# timeout: 5m0s
|
||||
# #
|
||||
# # You can create a GitHub token by following the instructions in
|
||||
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
|
||||
# #
|
||||
# # github_token: xxx
|
||||
{% endif %}
|
||||
|
||||
jobservice:
|
||||
# Maximum number of job workers in job service
|
||||
{% if jobservice is defined %}
|
||||
max_job_workers: {{ jobservice.max_job_workers }}
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
{% if jobservice.job_loggers is defined %}
|
||||
job_loggers:
|
||||
{% for job_logger in jobservice.job_loggers %}
|
||||
- {{job_logger}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
{% endif %}
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
{% if jobservice.logger_sweeper_duration is defined %}
|
||||
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
|
||||
{% else %}
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
{% else %}
|
||||
max_job_workers: 10
|
||||
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
|
||||
job_loggers:
|
||||
- STD_OUTPUT
|
||||
- FILE
|
||||
# - DB
|
||||
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
|
||||
logger_sweeper_duration: 1
|
||||
{% endif %}
|
||||
|
||||
notification:
|
||||
# Maximum retry count for webhook job
|
||||
{% if notification is defined %}
|
||||
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
|
||||
# HTTP client timeout for webhook job
|
||||
{% if notification.webhook_job_http_client_timeout is defined %}
|
||||
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
|
||||
{% else %}
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
{% else %}
|
||||
webhook_job_max_retry: 3
|
||||
# HTTP client timeout for webhook job
|
||||
webhook_job_http_client_timeout: 3 #seconds
|
||||
{% endif %}
|
||||
|
||||
# Log configurations
|
||||
log:
|
||||
# options are debug, info, warning, error, fatal
|
||||
{% if log is defined %}
|
||||
level: {{ log.level }}
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: {{ log.local.rotate_count }}
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: {{ log.local.rotate_size }}
|
||||
# The directory on your host that store log
|
||||
location: {{ log.local.location }}
|
||||
{% if log.external_endpoint is defined %}
|
||||
external_endpoint:
|
||||
# protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
protocol: {{ log.external_endpoint.protocol }}
|
||||
# The host of external endpoint
|
||||
host: {{ log.external_endpoint.host }}
|
||||
# Port of external endpoint
|
||||
port: {{ log.external_endpoint.port }}
|
||||
{% else %}
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
{% else %}
|
||||
level: info
|
||||
# configs for logs in local storage
|
||||
local:
|
||||
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
|
||||
rotate_count: 50
|
||||
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
|
||||
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
|
||||
# are all valid.
|
||||
rotate_size: 200M
|
||||
# The directory on your host that store log
|
||||
location: /var/log/harbor
|
||||
|
||||
# Uncomment following lines to enable external syslog endpoint.
|
||||
# external_endpoint:
|
||||
# # protocol used to transmit log to external endpoint, options is tcp or udp
|
||||
# protocol: tcp
|
||||
# # The host of external endpoint
|
||||
# host: localhost
|
||||
# # Port of external endpoint
|
||||
# port: 5140
|
||||
{% endif %}
|
||||
|
||||
|
||||
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
|
||||
_version: 2.11.0
|
||||
{% if external_database is defined %}
|
||||
# Uncomment external_database if using external database.
|
||||
external_database:
|
||||
harbor:
|
||||
host: {{ external_database.harbor.host }}
|
||||
port: {{ external_database.harbor.port }}
|
||||
db_name: {{ external_database.harbor.db_name }}
|
||||
username: {{ external_database.harbor.username }}
|
||||
password: {{ external_database.harbor.password }}
|
||||
ssl_mode: {{ external_database.harbor.ssl_mode }}
|
||||
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
|
||||
max_open_conns: {{ external_database.harbor.max_open_conns}}
|
||||
{% else %}
|
||||
# Uncomment external_database if using external database.
|
||||
# external_database:
|
||||
# harbor:
|
||||
# host: harbor_db_host
|
||||
# port: harbor_db_port
|
||||
# db_name: harbor_db_name
|
||||
# username: harbor_db_username
|
||||
# password: harbor_db_password
|
||||
# ssl_mode: disable
|
||||
# max_idle_conns: 2
|
||||
# max_open_conns: 0
|
||||
{% endif %}
|
||||
|
||||
{% if redis is defined %}
|
||||
redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
{% if redis.registry_db_index is defined %}
|
||||
registry_db_index: {{ redis.registry_db_index }}
|
||||
{% else %}
|
||||
# # registry_db_index: 1
|
||||
{% endif %}
|
||||
{% if redis.jobservice_db_index is defined %}
|
||||
jobservice_db_index: {{ redis.jobservice_db_index }}
|
||||
{% else %}
|
||||
# # jobservice_db_index: 2
|
||||
{% endif %}
|
||||
{% if redis.trivy_db_index is defined %}
|
||||
trivy_db_index: {{ redis.trivy_db_index }}
|
||||
{% else %}
|
||||
# # trivy_db_index: 5
|
||||
{% endif %}
|
||||
{% if redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomment redis if need to customize redis db
|
||||
# redis:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# # registry_db_index: 1
|
||||
# # jobservice_db_index: 2
|
||||
# # trivy_db_index: 5
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if external_redis is defined %}
|
||||
external_redis:
|
||||
# support redis, redis+sentinel
|
||||
# host for redis: <host_redis>:<port_redis>
|
||||
# host for redis+sentinel:
|
||||
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
host: {{ external_redis.host }}
|
||||
password: {{ external_redis.password }}
|
||||
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
{% if external_redis.username is defined %}
|
||||
username: {{ external_redis.username }}
|
||||
{% else %}
|
||||
# username:
|
||||
{% endif %}
|
||||
# sentinel_master_set must be set to support redis+sentinel
|
||||
#sentinel_master_set:
|
||||
# db_index 0 is for core, it's unchangeable
|
||||
registry_db_index: {{ external_redis.registry_db_index }}
|
||||
jobservice_db_index: {{ external_redis.jobservice_db_index }}
|
||||
trivy_db_index: 5
|
||||
idle_timeout_seconds: 30
|
||||
{% if external_redis.harbor_db_index is defined %}
|
||||
harbor_db_index: {{ redis.harbor_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
{% endif %}
|
||||
{% if external_redis.cache_layer_db_index is defined %}
|
||||
cache_layer_db_index: {{ redis.cache_layer_db_index }}
|
||||
{% else %}
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
{% else %}
|
||||
# Uncomments external_redis if using external Redis server
|
||||
# external_redis:
|
||||
# # support redis, redis+sentinel
|
||||
# # host for redis: <host_redis>:<port_redis>
|
||||
# # host for redis+sentinel:
|
||||
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
|
||||
# host: redis:6379
|
||||
# password:
|
||||
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
|
||||
# # username:
|
||||
# # sentinel_master_set must be set to support redis+sentinel
|
||||
# #sentinel_master_set:
|
||||
# # db_index 0 is for core, it's unchangeable
|
||||
# registry_db_index: 1
|
||||
# jobservice_db_index: 2
|
||||
# trivy_db_index: 5
|
||||
# idle_timeout_seconds: 30
|
||||
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
|
||||
# # harbor_db_index: 6
|
||||
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
|
||||
# # cache_layer_db_index: 7
|
||||
{% endif %}
|
||||
|
||||
{% if uaa is defined %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
uaa:
|
||||
ca_file: {{ uaa.ca_file }}
|
||||
{% else %}
|
||||
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
|
||||
# uaa:
|
||||
# ca_file: /path/to/ca
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Global proxy
|
||||
# Config http proxy for components, e.g. http://my.proxy.com:3128
|
||||
# Components doesn't need to connect to each others via http proxy.
|
||||
# Remove component from `components` array if want disable proxy
|
||||
# for it. If you want use proxy for replication, MUST enable proxy
|
||||
# for core and jobservice, and set `http_proxy` and `https_proxy`.
|
||||
# Add domain to the `no_proxy` field, when you want disable proxy
|
||||
# for some special registry.
|
||||
{% if proxy is defined %}
|
||||
proxy:
|
||||
http_proxy: {{ proxy.http_proxy or ''}}
|
||||
https_proxy: {{ proxy.https_proxy or ''}}
|
||||
no_proxy: {{ proxy.no_proxy or ''}}
|
||||
{% if proxy.components is defined %}
|
||||
components:
|
||||
{% for component in proxy.components %}
|
||||
{% if component != 'clair' %}
|
||||
- {{component}}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
proxy:
|
||||
http_proxy:
|
||||
https_proxy:
|
||||
no_proxy:
|
||||
components:
|
||||
- core
|
||||
- jobservice
|
||||
- trivy
|
||||
{% endif %}
|
||||
|
||||
{% if metric is defined %}
|
||||
metric:
|
||||
enabled: {{ metric.enabled }}
|
||||
port: {{ metric.port }}
|
||||
path: {{ metric.path }}
|
||||
{% else %}
|
||||
# metric:
|
||||
# enabled: false
|
||||
# port: 9090
|
||||
# path: /metrics
|
||||
{% endif %}
|
||||
|
||||
# Trace related config
|
||||
# only can enable one trace provider(jaeger or otel) at the same time,
|
||||
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
|
||||
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
|
||||
# if using jaeger agetn mode uncomment agent_host and agent_port
|
||||
{% if trace is defined %}
|
||||
trace:
|
||||
enabled: {{ trace.enabled | lower}}
|
||||
sample_rate: {{ trace.sample_rate }}
|
||||
# # namespace used to differentiate different harbor services
|
||||
{% if trace.namespace is defined %}
|
||||
namespace: {{ trace.namespace }}
|
||||
{% else %}
|
||||
# namespace:
|
||||
{% endif %}
|
||||
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
{% if trace.attributes is defined%}
|
||||
attributes:
|
||||
{% for name, value in trace.attributes.items() %}
|
||||
{{name}}: {{value}}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
# attributes:
|
||||
# application: harbor
|
||||
{% endif %}
|
||||
{% if trace.jaeger is defined%}
|
||||
jaeger:
|
||||
endpoint: {{trace.jaeger.endpoint or '' }}
|
||||
username: {{trace.jaeger.username or ''}}
|
||||
password: {{trace.jaeger.password or ''}}
|
||||
agent_host: {{trace.jaeger.agent_host or ''}}
|
||||
agent_port: {{trace.jaeger.agent_port or ''}}
|
||||
{% else %}
|
||||
# jaeger:
|
||||
# endpoint:
|
||||
# username:
|
||||
# password:
|
||||
# agent_host:
|
||||
# agent_port:
|
||||
{% endif %}
|
||||
{% if trace. otel is defined %}
|
||||
otel:
|
||||
endpoint: {{trace.otel.endpoint or '' }}
|
||||
url_path: {{trace.otel.url_path or '' }}
|
||||
compression: {{trace.otel.compression | lower }}
|
||||
insecure: {{trace.otel.insecure | lower }}
|
||||
timeout: {{trace.otel.timeout or '' }}
|
||||
{% else %}
|
||||
# otel:
|
||||
# endpoint: hostname:4318
|
||||
# url_path: /v1/traces
|
||||
# compression: false
|
||||
# insecure: true
|
||||
# # timeout is in seconds
|
||||
# timeout: 10
|
||||
{% endif%}
|
||||
{% else %}
|
||||
# trace:
|
||||
# enabled: true
|
||||
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
|
||||
# sample_rate: 1
|
||||
# # # namespace used to differentiate different harbor services
|
||||
# # namespace:
|
||||
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
|
||||
# # attributes:
|
||||
# # application: harbor
|
||||
# # jaeger:
|
||||
# # endpoint: http://hostname:14268/api/traces
|
||||
# # username:
|
||||
# # password:
|
||||
# # agent_host: hostname
|
||||
# # agent_port: 6831
|
||||
# # otel:
|
||||
# # endpoint: hostname:4318
|
||||
# # url_path: /v1/traces
|
||||
# # compression: false
|
||||
# # insecure: true
|
||||
# # # timeout is in seconds
|
||||
# # timeout: 10
|
||||
{% endif %}
|
||||
|
||||
# enable purge _upload directories
|
||||
{% if upload_purging is defined %}
|
||||
upload_purging:
|
||||
enabled: {{ upload_purging.enabled | lower}}
|
||||
age: {{ upload_purging.age }}
|
||||
interval: {{ upload_purging.interval }}
|
||||
dryrun: {{ upload_purging.dryrun | lower}}
|
||||
{% else %}
|
||||
upload_purging:
|
||||
enabled: true
|
||||
# remove files in _upload directories which exist for a period of time, default is one week.
|
||||
age: 168h
|
||||
# the interval of the purge operations
|
||||
interval: 24h
|
||||
dryrun: false
|
||||
{% endif %}
|
||||
|
||||
# Cache layer related config
|
||||
{% if cache is defined %}
|
||||
cache:
|
||||
enabled: {{ cache.enabled | lower}}
|
||||
expire_hours: {{ cache.expire_hours }}
|
||||
{% else %}
|
||||
cache:
|
||||
enabled: false
|
||||
expire_hours: 24
|
||||
{% endif %}
|
||||
|
||||
# Harbor core configurations
|
||||
# Uncomment to enable the following harbor core related configuration items.
|
||||
{% if core is defined %}
|
||||
core:
|
||||
# The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# by default is implemented by db but you can switch the updation via redis which
|
||||
# can improve the performance of high concurrent pushing to the same project,
|
||||
# and reduce the database connections spike and occupies.
|
||||
# By redis will bring up some delay for quota usage updation for display, so only
|
||||
# suggest switch provider to redis if you were ran into the db connections spike aroud
|
||||
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
|
||||
quota_update_provider: {{ core.quota_update_provider }}
|
||||
{% else %}
|
||||
# core:
|
||||
# # The provider for updating project quota(usage), there are 2 options, redis or db,
|
||||
# # by default is implemented by db but you can switch the updation via redis which
|
||||
# # can improve the performance of high concurrent pushing to the same project,
|
||||
# # and reduce the database connections spike and occupies.
|
||||
# # By redis will bring up some delay for quota usage updation for display, so only
|
||||
# # suggest switch provider to redis if you were ran into the db connections spike around
|
||||
# # the scenario of high concurrent pushing to same project, no improvement for other scenes.
|
||||
# quota_update_provider: redis # Or db
|
||||
{% endif %}
|
|
@ -50,7 +50,12 @@ http {
|
|||
include /etc/nginx/conf.d/*.server.conf;
|
||||
|
||||
server {
|
||||
{% if ip_family.ipv4.enabled %}
|
||||
listen 8443 ssl;
|
||||
{% endif %}
|
||||
{% if ip_family.ipv6.enabled %}
|
||||
listen [::]:8443 ssl;
|
||||
{% endif %}
|
||||
# server_name harbordomain.com;
|
||||
server_tokens off;
|
||||
# SSL
|
||||
|
@ -59,7 +64,7 @@ http {
|
|||
|
||||
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
{% if internal_tls.strong_ssl_ciphers %}
|
||||
{% if strong_ssl_ciphers %}
|
||||
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
|
||||
{% else %}
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
|
|
|
@ -16,13 +16,19 @@ http {
|
|||
|
||||
server {
|
||||
{% if internal_tls.enabled %}
|
||||
#ip_family
|
||||
{% if ip_family.ipv4.enabled %}
|
||||
listen 8443 ssl;
|
||||
{% endif %}
|
||||
{% if ip_family.ipv6.enabled %}
|
||||
listen [::]:8443 ssl;
|
||||
{% endif %}
|
||||
# SSL
|
||||
ssl_certificate /etc/harbor/tls/portal.crt;
|
||||
ssl_certificate_key /etc/harbor/tls/portal.key;
|
||||
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
{% if internal_tls.strong_ssl_ciphers %}
|
||||
{% if strong_ssl_ciphers %}
|
||||
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
|
||||
{% else %}
|
||||
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';
|
||||
|
|
|
@ -10,6 +10,7 @@ SCANNER_TRIVY_VULN_TYPE=os,library
|
|||
SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
|
||||
SCANNER_TRIVY_IGNORE_UNFIXED={{trivy_ignore_unfixed}}
|
||||
SCANNER_TRIVY_SKIP_UPDATE={{trivy_skip_update}}
|
||||
SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE={{trivy_skip_java_db_update}}
|
||||
SCANNER_TRIVY_OFFLINE_SCAN={{trivy_offline_scan}}
|
||||
SCANNER_TRIVY_SECURITY_CHECKS={{trivy_security_check}}
|
||||
SCANNER_TRIVY_GITHUB_TOKEN={{trivy_github_token}}
|
||||
|
|
|
@ -212,6 +212,7 @@ def parse_yaml_config(config_file_path, with_trivy):
|
|||
trivy_configs = configs.get("trivy") or {}
|
||||
config_dict['trivy_github_token'] = trivy_configs.get("github_token") or ''
|
||||
config_dict['trivy_skip_update'] = trivy_configs.get("skip_update") or False
|
||||
config_dict['trivy_skip_java_db_update'] = trivy_configs.get("skip_java_db_update") or False
|
||||
config_dict['trivy_offline_scan'] = trivy_configs.get("offline_scan") or False
|
||||
config_dict['trivy_security_check'] = trivy_configs.get("security_check") or 'vuln'
|
||||
config_dict['trivy_ignore_unfixed'] = trivy_configs.get("ignore_unfixed") or False
|
||||
|
@ -298,6 +299,20 @@ def parse_yaml_config(config_file_path, with_trivy):
|
|||
external_database=config_dict['external_database'])
|
||||
else:
|
||||
config_dict['internal_tls'] = InternalTLS()
|
||||
# the configure item apply to internal and external tls communication
|
||||
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
|
||||
# but it is more reasonable to configure it in https_config
|
||||
if https_config:
|
||||
config_dict['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers')
|
||||
else:
|
||||
config_dict['strong_ssl_ciphers'] = False
|
||||
|
||||
if internal_tls_config:
|
||||
config_dict['strong_ssl_ciphers'] = config_dict['strong_ssl_ciphers'] or internal_tls_config.get('strong_ssl_ciphers')
|
||||
|
||||
|
||||
# ip_family config
|
||||
config_dict['ip_family'] = configs.get('ip_family') or {'ipv4': {'enabled': True}, 'ipv6': {'enabled': False}}
|
||||
|
||||
# metric configs
|
||||
metric_config = configs.get('metric')
|
||||
|
|
|
@ -27,6 +27,12 @@ def read_conf(path):
|
|||
with open(path) as f:
|
||||
try:
|
||||
d = yaml.safe_load(f)
|
||||
# the strong_ssl_ciphers configure item apply to internal and external tls communication
|
||||
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
|
||||
# but it will move to https section after migration
|
||||
https_config = d.get("https") or {}
|
||||
internal_tls = d.get('internal_tls') or {}
|
||||
d['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers') or internal_tls.get('strong_ssl_ciphers')
|
||||
except Exception as e:
|
||||
click.echo("parse config file err, make sure your harbor config version is above 1.8.0", e)
|
||||
exit(-1)
|
||||
|
|
|
@ -63,7 +63,9 @@ def render_nginx_template(config_dict):
|
|||
ssl_cert=SSL_CERT_PATH,
|
||||
ssl_cert_key=SSL_CERT_KEY_PATH,
|
||||
internal_tls=config_dict['internal_tls'],
|
||||
metric=config_dict['metric'])
|
||||
metric=config_dict['metric'],
|
||||
strong_ssl_ciphers=config_dict['strong_ssl_ciphers'],
|
||||
ip_family=config_dict['ip_family'])
|
||||
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
|
||||
|
||||
else:
|
||||
|
|
|
@ -14,5 +14,8 @@ def prepare_portal(config_dict):
|
|||
str(portal_conf_template_path),
|
||||
portal_conf,
|
||||
internal_tls=config_dict['internal_tls'],
|
||||
ip_family=config_dict['ip_family'],
|
||||
uid=DEFAULT_UID,
|
||||
gid=DEFAULT_GID)
|
||||
gid=DEFAULT_GID,
|
||||
strong_ssl_ciphers=config_dict['strong_ssl_ciphers']
|
||||
)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.21.4
|
||||
FROM golang:1.22.2
|
||||
|
||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||
ENV BUILDTAGS include_oss include_gcs
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.21.4
|
||||
FROM golang:1.22.2
|
||||
|
||||
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/
|
||||
|
|
|
@ -19,7 +19,7 @@ TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
|
|||
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
|
||||
cd $TEMP; git checkout $VERSION; cd -
|
||||
|
||||
echo "Building Trivy adapter binary based on golang:1.21.4..."
|
||||
echo "Building Trivy adapter binary based on golang:1.22.2..."
|
||||
cp Dockerfile.binary $TEMP
|
||||
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ func main() {
|
|||
)
|
||||
prometheus.MustRegister(harborExporter)
|
||||
if err := harborExporter.ListenAndServe(); err != nil {
|
||||
log.Errorf("Error starting Harbor expoter %s", err)
|
||||
log.Errorf("Error starting Harbor exporter %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,20 +48,23 @@ func main() {
|
|||
log.Fatalf("Failed to connect to Database, error: %v\n", err)
|
||||
}
|
||||
defer db.Close()
|
||||
c := make(chan struct{}, 1)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
defer close(c)
|
||||
|
||||
err := db.Ping()
|
||||
for ; err != nil; err = db.Ping() {
|
||||
log.Println("Failed to Ping DB, sleep for 1 second.")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
c <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(30 * time.Second):
|
||||
log.Fatal("Failed to connect DB after 30 seconds, time out. \n")
|
||||
}
|
||||
|
||||
row := db.QueryRow(pgSQLCheckColStmt)
|
||||
var tblCount, colCount int
|
||||
if err := row.Scan(&tblCount, &colCount); err != nil {
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
type contextKey string
|
||||
|
||||
// const variables
|
||||
|
@ -241,4 +243,7 @@ const (
|
|||
BeegoMaxUploadSizeBytes = "beego_max_upload_size_bytes"
|
||||
// DefaultBeegoMaxUploadSizeBytes sets default max upload size to 128GB
|
||||
DefaultBeegoMaxUploadSizeBytes = 1 << 37
|
||||
|
||||
// Global Leeway used for token validation
|
||||
JwtLeeway = 60 * time.Second
|
||||
)
|
||||
|
|
|
@ -51,6 +51,7 @@ const (
|
|||
ResourceRobot = Resource("robot")
|
||||
ResourceNotificationPolicy = Resource("notification-policy")
|
||||
ResourceScan = Resource("scan")
|
||||
ResourceSBOM = Resource("sbom")
|
||||
ResourceScanner = Resource("scanner")
|
||||
ResourceArtifact = Resource("artifact")
|
||||
ResourceTag = Resource("tag")
|
||||
|
@ -149,6 +150,9 @@ var (
|
|||
{Resource: ResourceSecurityHub, Action: ActionList},
|
||||
|
||||
{Resource: ResourceCatalog, Action: ActionRead},
|
||||
|
||||
{Resource: ResourceQuota, Action: ActionRead},
|
||||
{Resource: ResourceQuota, Action: ActionList},
|
||||
},
|
||||
"Project": {
|
||||
{Resource: ResourceLog, Action: ActionList},
|
||||
|
@ -179,6 +183,10 @@ var (
|
|||
{Resource: ResourceScan, Action: ActionRead},
|
||||
{Resource: ResourceScan, Action: ActionStop},
|
||||
|
||||
{Resource: ResourceSBOM, Action: ActionCreate},
|
||||
{Resource: ResourceSBOM, Action: ActionStop},
|
||||
{Resource: ResourceSBOM, Action: ActionRead},
|
||||
|
||||
{Resource: ResourceTag, Action: ActionCreate},
|
||||
{Resource: ResourceTag, Action: ActionList},
|
||||
{Resource: ResourceTag, Action: ActionDelete},
|
||||
|
@ -221,6 +229,8 @@ var (
|
|||
{Resource: ResourceLabel, Action: ActionDelete},
|
||||
{Resource: ResourceLabel, Action: ActionList},
|
||||
{Resource: ResourceLabel, Action: ActionUpdate},
|
||||
|
||||
{Resource: ResourceQuota, Action: ActionRead},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -86,6 +86,9 @@ var (
|
|||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionCreate},
|
||||
|
@ -169,6 +172,9 @@ var (
|
|||
{Resource: rbac.ResourceScan, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionCreate},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionStop},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -223,6 +229,7 @@ var (
|
|||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -267,6 +274,7 @@ var (
|
|||
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
@ -284,11 +292,13 @@ var (
|
|||
{Resource: rbac.ResourceQuota, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceRepository, Action: rbac.ActionList},
|
||||
{Resource: rbac.ResourceRepository, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceRepository, Action: rbac.ActionPull},
|
||||
|
||||
{Resource: rbac.ResourceConfiguration, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScan, Action: rbac.ActionRead},
|
||||
{Resource: rbac.ResourceSBOM, Action: rbac.ActionRead},
|
||||
|
||||
{Resource: rbac.ResourceScanner, Action: rbac.ActionRead},
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ func (s *SecurityContext) IsSolutionUser() bool {
|
|||
// Can returns whether the user can do action on resource
|
||||
// returns true if the corresponding user of the secret
|
||||
// is jobservice or core service, otherwise returns false
|
||||
func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource types.Resource) bool {
|
||||
func (s *SecurityContext) Can(_ context.Context, _ types.Action, _ types.Resource) bool {
|
||||
if s.store == nil {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func (t *tokenSecurityCtx) GetMyProjects() ([]*models.Project, error) {
|
|||
return []*models.Project{}, nil
|
||||
}
|
||||
|
||||
func (t *tokenSecurityCtx) GetProjectRoles(projectIDOrName interface{}) []int {
|
||||
func (t *tokenSecurityCtx) GetProjectRoles(_ interface{}) []int {
|
||||
return []int{}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ type GCResult struct {
|
|||
}
|
||||
|
||||
// NewRegistryCtl returns a mock registry server
|
||||
func NewRegistryCtl(config map[string]interface{}) (*httptest.Server, error) {
|
||||
func NewRegistryCtl(_ map[string]interface{}) (*httptest.Server, error) {
|
||||
m := []*RequestHandlerMapping{}
|
||||
|
||||
gcr := GCResult{true, "hello-world", time.Now(), time.Now()}
|
||||
|
|
|
@ -49,7 +49,7 @@ func (fc *FakeClient) GetUserInfo(token string) (*UserInfo, error) {
|
|||
}
|
||||
|
||||
// UpdateConfig ...
|
||||
func (fc *FakeClient) UpdateConfig(cfg *ClientConfig) error {
|
||||
func (fc *FakeClient) UpdateConfig(_ *ClientConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -313,11 +313,11 @@ func ValidateCronString(cron string) error {
|
|||
// sort.Slice(input, func(i, j int) bool {
|
||||
// return MostMatchSorter(input[i].GroupName, input[j].GroupName, matchWord)
|
||||
// })
|
||||
//
|
||||
// a is the field to be used for sorting, b is the other field, matchWord is the word to be matched
|
||||
// the return value is true if a is less than b
|
||||
// for example, search with "user", input is {"harbor_user", "user", "users, "admin_user"}
|
||||
// it returns with this order {"user", "users", "admin_user", "harbor_user"}
|
||||
|
||||
func MostMatchSorter(a, b string, matchWord string) bool {
|
||||
// exact match always first
|
||||
if a == matchWord {
|
||||
|
@ -332,3 +332,8 @@ func MostMatchSorter(a, b string, matchWord string) bool {
|
|||
}
|
||||
return len(a) < len(b)
|
||||
}
|
||||
|
||||
// IsLocalPath checks if path is local, includes the empty path
|
||||
func IsLocalPath(path string) bool {
|
||||
return len(path) == 0 || (strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//"))
|
||||
}
|
||||
|
|
|
@ -486,3 +486,26 @@ func TestValidateCronString(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsLocalPath(t *testing.T) {
|
||||
type args struct {
|
||||
path string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{"normal test", args{"/harbor/project"}, true},
|
||||
{"failed", args{"www.myexample.com"}, false},
|
||||
{"other_site1", args{"//www.myexample.com"}, false},
|
||||
{"other_site2", args{"https://www.myexample.com"}, false},
|
||||
{"other_site", args{"http://www.myexample.com"}, false},
|
||||
{"empty_path", args{""}, true},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equalf(t, tt.want, IsLocalPath(tt.args.path), "IsLocalPath(%v)", tt.args.path)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,10 +127,18 @@ func (a *abstractor) abstractManifestV2Metadata(artifact *artifact.Artifact, con
|
|||
}
|
||||
// use the "manifest.config.mediatype" as the media type of the artifact
|
||||
artifact.MediaType = manifest.Config.MediaType
|
||||
|
||||
if manifest.Annotations[wasm.AnnotationVariantKey] == wasm.AnnotationVariantValue || manifest.Annotations[wasm.AnnotationHandlerKey] == wasm.AnnotationHandlerValue {
|
||||
artifact.MediaType = wasm.MediaType
|
||||
}
|
||||
/*
|
||||
https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers
|
||||
For referrers list, if the artifactType is empty or missing in the image manifest, the value of artifactType MUST be set to the config descriptor mediaType value
|
||||
*/
|
||||
if manifest.ArtifactType != "" {
|
||||
artifact.ArtifactType = manifest.ArtifactType
|
||||
} else {
|
||||
artifact.ArtifactType = manifest.Config.MediaType
|
||||
}
|
||||
|
||||
// set size
|
||||
artifact.Size = int64(len(content)) + manifest.Config.Size
|
||||
|
@ -153,6 +161,16 @@ func (a *abstractor) abstractIndexMetadata(ctx context.Context, art *artifact.Ar
|
|||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
https://github.com/opencontainers/distribution-spec/blob/v1.1.0/spec.md#listing-referrers
|
||||
For referrers list, If the artifactType is empty or missing in an index, the artifactType MUST be omitted.
|
||||
*/
|
||||
if index.ArtifactType != "" {
|
||||
art.ArtifactType = index.ArtifactType
|
||||
} else {
|
||||
art.ArtifactType = ""
|
||||
}
|
||||
|
||||
// set annotations
|
||||
art.Annotations = index.Annotations
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
package artifact
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
|
@ -175,7 +176,66 @@ var (
|
|||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
|
||||
OCIManifest = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.example.config.v1+json",
|
||||
"digest": "sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03",
|
||||
"size": 123
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
OCIManifestWithArtifactType = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.example.config.v1+json",
|
||||
"digest": "sha256:5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03",
|
||||
"size": 123
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
OCIManifestWithEmptyConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
|
||||
"size": 2
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example+type",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"oci.opencontainers.image.created": "2023-01-02T03:04:05Z",
|
||||
"com.example.data": "payload"
|
||||
}
|
||||
}`
|
||||
index = `{
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
|
@ -202,6 +262,34 @@ var (
|
|||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
indexWithArtifactType = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.index.v1+json",
|
||||
"artifactType": "application/vnd.food.stand",
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 7143,
|
||||
"digest": "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f",
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 7682,
|
||||
"digest": "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270",
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
)
|
||||
|
||||
type abstractorTestSuite struct {
|
||||
|
@ -267,6 +355,67 @@ func (a *abstractorTestSuite) TestAbstractMetadataOfV2Manifest() {
|
|||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// oci-spec v1
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfOCIManifest() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifest))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1916), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// oci-spec v1.1.0 with artifactType
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfOCIManifestWithArtifactType() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithArtifactType))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal("application/vnd.example.config.v1+json", artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example+type", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1966), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
}
|
||||
|
||||
// empty config with artifactType
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfV2ManifestWithEmptyConfig() {
|
||||
// v1.MediaTypeImageManifest
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithEmptyConfig))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
a.processor.On("AbstractMetadata", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageManifest, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeEmptyJSON, artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.example+type", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(1880), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 2)
|
||||
a.Equal("payload", artifact.Annotations["com.example.data"])
|
||||
}
|
||||
|
||||
// OCI index
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfIndex() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageIndex, []byte(index))
|
||||
|
@ -279,17 +428,41 @@ func (a *abstractorTestSuite) TestAbstractMetadataOfIndex() {
|
|||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
err = a.abstractor.AbstractMetadata(nil, artifact)
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.MediaType)
|
||||
a.Assert().Equal("", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(668), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Assert().Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
a.Len(artifact.References, 2)
|
||||
}
|
||||
|
||||
func (a *abstractorTestSuite) TestAbstractMetadataOfIndexWithArtifactType() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageIndex, []byte(indexWithArtifactType))
|
||||
a.Require().Nil(err)
|
||||
a.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(manifest, "", nil)
|
||||
a.argMgr.On("GetByDigest", mock.Anything, mock.Anything, mock.Anything).Return(&artifact.Artifact{
|
||||
ID: 2,
|
||||
Size: 10,
|
||||
}, nil)
|
||||
artifact := &artifact.Artifact{
|
||||
ID: 1,
|
||||
}
|
||||
err = a.abstractor.AbstractMetadata(context.TODO(), artifact)
|
||||
a.Require().Nil(err)
|
||||
a.Assert().Equal(int64(1), artifact.ID)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.ManifestMediaType)
|
||||
a.Assert().Equal(v1.MediaTypeImageIndex, artifact.MediaType)
|
||||
a.Assert().Equal("application/vnd.food.stand", artifact.ArtifactType)
|
||||
a.Assert().Equal(int64(801), artifact.Size)
|
||||
a.Require().Len(artifact.Annotations, 1)
|
||||
a.Assert().Equal("value1", artifact.Annotations["com.example.key1"])
|
||||
a.Len(artifact.References, 2)
|
||||
}
|
||||
|
||||
type unknownManifest struct{}
|
||||
|
||||
func (u *unknownManifest) References() []distribution.Descriptor {
|
||||
|
|
|
@ -39,7 +39,7 @@ type v1alpha1Parser struct {
|
|||
regCli reg.Client
|
||||
}
|
||||
|
||||
func (p *v1alpha1Parser) Parse(ctx context.Context, artifact *artifact.Artifact, manifest []byte) error {
|
||||
func (p *v1alpha1Parser) Parse(_ context.Context, artifact *artifact.Artifact, manifest []byte) error {
|
||||
if artifact.ManifestMediaType != v1.MediaTypeImageManifest && artifact.ManifestMediaType != schema2.MediaTypeManifest {
|
||||
return nil
|
||||
}
|
||||
|
@ -92,6 +92,7 @@ func parseV1alpha1Icon(artifact *artifact.Artifact, manifest *v1.Manifest, reg r
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer icon.Close()
|
||||
// check the size of the size <= 1MB
|
||||
data, err := io.ReadAll(io.LimitReader(icon, 1<<20))
|
||||
if err != nil {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/controller/artifact/processor/chart"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/cnab"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/image"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/sbom"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/wasm"
|
||||
"github.com/goharbor/harbor/src/controller/event/metadata"
|
||||
"github.com/goharbor/harbor/src/controller/tag"
|
||||
|
@ -57,7 +58,10 @@ import (
|
|||
|
||||
var (
|
||||
// Ctl is a global artifact controller instance
|
||||
Ctl = NewController()
|
||||
Ctl = NewController()
|
||||
skippedContentTypes = map[string]struct{}{
|
||||
"application/vnd.in-toto+json": {},
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -73,6 +77,7 @@ var (
|
|||
chart.ArtifactTypeChart: icon.DigestOfIconChart,
|
||||
cnab.ArtifactTypeCNAB: icon.DigestOfIconCNAB,
|
||||
wasm.ArtifactTypeWASM: icon.DigestOfIconWASM,
|
||||
sbom.ArtifactTypeSBOM: icon.DigestOfIconAccSBOM,
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -111,6 +116,8 @@ type Controller interface {
|
|||
RemoveLabel(ctx context.Context, artifactID int64, labelID int64) (err error)
|
||||
// Walk walks the artifact tree rooted at root, calling walkFn for each artifact in the tree, including root.
|
||||
Walk(ctx context.Context, root *Artifact, walkFn func(*Artifact) error, option *Option) error
|
||||
// HasUnscannableLayer check artifact with digest if has unscannable layer
|
||||
HasUnscannableLayer(ctx context.Context, dgst string) (bool, error)
|
||||
}
|
||||
|
||||
// NewController creates an instance of the default artifact controller
|
||||
|
@ -227,6 +234,7 @@ func (c *controller) ensureArtifact(ctx context.Context, repository, digest stri
|
|||
if !errors.IsConflictErr(err) {
|
||||
return false, nil, err
|
||||
}
|
||||
log.Debugf("failed to create artifact %s@%s: %v", repository, digest, err)
|
||||
// if got conflict error, try to get the artifact again
|
||||
artifact, err = c.artMgr.GetByDigest(ctx, repository, digest)
|
||||
if err != nil {
|
||||
|
@ -323,12 +331,6 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
|||
return err
|
||||
}
|
||||
|
||||
if isAccessory {
|
||||
if err := c.accessoryMgr.DeleteAccessories(ctx, q.New(q.KeyWords{"ArtifactID": art.ID, "Digest": art.Digest})); err != nil && !errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// the child artifact is referenced by some tags, skip
|
||||
if !isRoot && len(art.Tags) > 0 {
|
||||
return nil
|
||||
|
@ -351,11 +353,26 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
|||
return nil
|
||||
}
|
||||
|
||||
if isAccessory {
|
||||
if err := c.accessoryMgr.DeleteAccessories(ctx, q.New(q.KeyWords{"ArtifactID": art.ID, "Digest": art.Digest})); err != nil && !errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// delete accessories if contains any
|
||||
for _, acc := range art.Accessories {
|
||||
// only hard ref accessory should be removed
|
||||
if acc.IsHard() {
|
||||
if err = c.deleteDeeply(ctx, acc.GetData().ArtifactID, true, true); err != nil {
|
||||
// if this acc artifact has parent(is child), set isRoot to false
|
||||
parents, err := c.artMgr.ListReferences(ctx, &q.Query{
|
||||
Keywords: map[string]interface{}{
|
||||
"ChildID": acc.GetData().ArtifactID,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, acc.GetData().ArtifactID, len(parents) == 0, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -368,7 +385,12 @@ func (c *controller) deleteDeeply(ctx context.Context, id int64, isRoot, isAcces
|
|||
!errors.IsErr(err, errors.NotFoundCode) {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, reference.ChildID, false, false); err != nil {
|
||||
// if the child artifact is an accessory, set isAccessory to true
|
||||
accs, err := c.accessoryMgr.List(ctx, q.New(q.KeyWords{"ArtifactID": reference.ChildID}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = c.deleteDeeply(ctx, reference.ChildID, false, len(accs) > 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -742,3 +764,21 @@ func (c *controller) populateAccessories(ctx context.Context, art *Artifact) {
|
|||
}
|
||||
art.Accessories = accs
|
||||
}
|
||||
|
||||
// HasUnscannableLayer check if it is a in-toto sbom, if it contains any blob with a content_type is application/vnd.in-toto+json, then consider as in-toto sbom
|
||||
func (c *controller) HasUnscannableLayer(ctx context.Context, dgst string) (bool, error) {
|
||||
if len(dgst) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
blobs, err := c.blobMgr.GetByArt(ctx, dgst)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, b := range blobs {
|
||||
if _, exist := skippedContentTypes[b.ContentType]; exist {
|
||||
log.Debugf("the artifact with digest %v is unscannable, because it contains content type: %v", dgst, b.ContentType)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import (
|
|||
accessorymodel "github.com/goharbor/harbor/src/pkg/accessory/model"
|
||||
basemodel "github.com/goharbor/harbor/src/pkg/accessory/model/base"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/pkg/blob/models"
|
||||
"github.com/goharbor/harbor/src/pkg/label/model"
|
||||
repomodel "github.com/goharbor/harbor/src/pkg/repository/model"
|
||||
model_tag "github.com/goharbor/harbor/src/pkg/tag/model/tag"
|
||||
|
@ -237,6 +238,21 @@ func (c *controllerTestSuite) TestEnsureArtifact() {
|
|||
c.Require().Nil(err)
|
||||
c.True(created)
|
||||
c.Equal(int64(1), art.ID)
|
||||
|
||||
// reset the mock
|
||||
c.SetupTest()
|
||||
|
||||
// the artifact doesn't exist and get a conflict error on creating the artifact and fail to get again
|
||||
c.repoMgr.On("GetByName", mock.Anything, mock.Anything).Return(&repomodel.RepoRecord{
|
||||
ProjectID: 1,
|
||||
}, nil)
|
||||
c.artMgr.On("GetByDigest", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.NotFoundError(nil))
|
||||
c.artMgr.On("Create", mock.Anything, mock.Anything).Return(int64(1), errors.ConflictError(nil))
|
||||
c.abstractor.On("AbstractMetadata").Return(nil)
|
||||
created, art, err = c.ctl.ensureArtifact(orm.NewContext(nil, &ormtesting.FakeOrmer{}), "library/hello-world", digest)
|
||||
c.Require().Error(err, errors.NotFoundError(nil))
|
||||
c.False(created)
|
||||
c.Require().Nil(art)
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestEnsure() {
|
||||
|
@ -663,6 +679,29 @@ func (c *controllerTestSuite) TestWalk() {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *controllerTestSuite) TestIsIntoto() {
|
||||
blobs := []*models.Blob{
|
||||
{Digest: "sha256:00000", ContentType: "application/vnd.oci.image.manifest.v1+json"},
|
||||
{Digest: "sha256:22222", ContentType: "application/vnd.oci.image.config.v1+json"},
|
||||
{Digest: "sha256:11111", ContentType: "application/vnd.in-toto+json"},
|
||||
}
|
||||
c.blobMgr.On("GetByArt", mock.Anything, mock.Anything).Return(blobs, nil).Once()
|
||||
isIntoto, err := c.ctl.HasUnscannableLayer(context.Background(), "sha256: 77777")
|
||||
c.Nil(err)
|
||||
c.True(isIntoto)
|
||||
|
||||
blobs2 := []*models.Blob{
|
||||
{Digest: "sha256:00000", ContentType: "application/vnd.oci.image.manifest.v1+json"},
|
||||
{Digest: "sha256:22222", ContentType: "application/vnd.oci.image.config.v1+json"},
|
||||
{Digest: "sha256:11111", ContentType: "application/vnd.oci.image.layer.v1.tar+gzip"},
|
||||
}
|
||||
|
||||
c.blobMgr.On("GetByArt", mock.Anything, mock.Anything).Return(blobs2, nil).Once()
|
||||
isIntoto2, err := c.ctl.HasUnscannableLayer(context.Background(), "sha256: 8888")
|
||||
c.Nil(err)
|
||||
c.False(isIntoto2)
|
||||
}
|
||||
|
||||
func TestControllerTestSuite(t *testing.T) {
|
||||
suite.Run(t, &controllerTestSuite{})
|
||||
}
|
||||
|
|
|
@ -37,22 +37,22 @@ type IndexProcessor struct {
|
|||
}
|
||||
|
||||
// AbstractMetadata abstracts metadata of artifact
|
||||
func (m *IndexProcessor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, content []byte) error {
|
||||
func (m *IndexProcessor) AbstractMetadata(_ context.Context, _ *artifact.Artifact, _ []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AbstractAddition abstracts the addition of artifact
|
||||
func (m *IndexProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
func (m *IndexProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported", addition)
|
||||
}
|
||||
|
||||
// GetArtifactType returns the artifact type
|
||||
func (m *IndexProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (m *IndexProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// ListAdditionTypes returns the supported addition types
|
||||
func (m *IndexProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (m *IndexProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -64,23 +64,23 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
|
|||
}
|
||||
|
||||
// AbstractAddition abstracts the addition of artifact
|
||||
func (m *ManifestProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
func (m *ManifestProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported", addition)
|
||||
}
|
||||
|
||||
// GetArtifactType returns the artifact type
|
||||
func (m *ManifestProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (m *ManifestProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// ListAdditionTypes returns the supported addition types
|
||||
func (m *ManifestProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (m *ManifestProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v"
|
||||
func (m *ManifestProcessor) UnmarshalConfig(ctx context.Context, repository string, manifest []byte, v interface{}) error {
|
||||
func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v interface{}) error {
|
||||
// unmarshal manifest
|
||||
mani := &v1.Manifest{}
|
||||
if err := json.Unmarshal(manifest, mani); err != nil {
|
||||
|
|
|
@ -58,7 +58,7 @@ type processor struct {
|
|||
chartOperator chart.Operator
|
||||
}
|
||||
|
||||
func (p *processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
|
||||
func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
|
||||
if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s", addition, ArtifactTypeChart)
|
||||
|
@ -85,11 +85,11 @@ func (p *processor) AbstractAddition(ctx context.Context, artifact *artifact.Art
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer blob.Close()
|
||||
content, err := io.ReadAll(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blob.Close()
|
||||
chartDetails, err := p.chartOperator.GetDetails(content)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -122,10 +122,10 @@ func (p *processor) AbstractAddition(ctx context.Context, artifact *artifact.Art
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (p *processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeChart
|
||||
}
|
||||
|
||||
func (p *processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (p *processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return []string{AdditionTypeValues, AdditionTypeReadme, AdditionTypeDependencies}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ type processor struct {
|
|||
manifestProcessor *base.ManifestProcessor
|
||||
}
|
||||
|
||||
func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, manifest []byte) error {
|
||||
func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, _ []byte) error {
|
||||
cfgManiDgt := ""
|
||||
// try to get the digest of the manifest that the config layer is referenced by
|
||||
for _, reference := range art.References {
|
||||
|
@ -72,6 +72,6 @@ func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact
|
|||
return p.manifestProcessor.AbstractMetadata(ctx, art, payload)
|
||||
}
|
||||
|
||||
func (p *processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (p *processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeCNAB
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ type defaultProcessor struct {
|
|||
regCli registry.Client
|
||||
}
|
||||
|
||||
func (d *defaultProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (d *defaultProcessor) GetArtifactType(_ context.Context, artifact *artifact.Artifact) string {
|
||||
// try to parse the type from the media type
|
||||
strs := artifactTypeRegExp.FindStringSubmatch(artifact.MediaType)
|
||||
if len(strs) == 2 {
|
||||
|
@ -57,7 +57,7 @@ func (d *defaultProcessor) GetArtifactType(ctx context.Context, artifact *artifa
|
|||
// can not get the artifact type from the media type, return unknown
|
||||
return ArtifactTypeUnknown
|
||||
}
|
||||
func (d *defaultProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (d *defaultProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func (d *defaultProcessor) AbstractMetadata(ctx context.Context, artifact *artif
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *defaultProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*Addition, error) {
|
||||
func (d *defaultProcessor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, _ string) (*Addition, error) {
|
||||
// Addition not support for user-defined artifact yet.
|
||||
// It will be support in the future.
|
||||
// return error directly
|
||||
|
|
|
@ -117,7 +117,31 @@ var (
|
|||
}
|
||||
]
|
||||
}`
|
||||
v2ManifestWithUnknownConfig = `{
|
||||
OCIManifestWithUnknownJsonConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.exmaple.config.v1+json",
|
||||
"digest": "sha256:48ef4a53c0770222d9752cd0588431dbda54667046208c79804e34c15c1579cd",
|
||||
"size": 129
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example.data.v1.tar+gzip",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"com.example.key1": "value1"
|
||||
}
|
||||
}`
|
||||
UnknownJsonConfig = `{
|
||||
"author": "yminer",
|
||||
"architecture": "amd64",
|
||||
"selfdefined": "true"
|
||||
}`
|
||||
OCIManifestWithUnknownConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"config": {
|
||||
|
@ -141,7 +165,30 @@ var (
|
|||
"newUnspecifiedField": null
|
||||
}
|
||||
}`
|
||||
unknownConfig = `{NHL Peanut Butter on my NHL bagel}`
|
||||
UnknownConfig = `{NHL Peanut Butter on my NHL bagel}`
|
||||
|
||||
OCIManifestWithEmptyConfig = `{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"artifactType": "application/vnd.example+type",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.empty.v1+json",
|
||||
"digest": "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a",
|
||||
"size": 2
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.example+type",
|
||||
"digest": "sha256:e258d248fda94c63753607f7c4494ee0fcbe92f1a76bfdac795c9d84101eb317",
|
||||
"size": 1234
|
||||
}
|
||||
],
|
||||
"annotations": {
|
||||
"oci.opencontainers.image.created": "2023-01-02T03:04:05Z",
|
||||
"com.example.data": "payload"
|
||||
}
|
||||
}`
|
||||
emptyConfig = `{}`
|
||||
)
|
||||
|
||||
type defaultProcessorTestSuite struct {
|
||||
|
@ -190,6 +237,12 @@ func (d *defaultProcessorTestSuite) TestGetArtifactType() {
|
|||
typee = processor.GetArtifactType(nil, art)
|
||||
d.Equal("IMAGE", typee)
|
||||
|
||||
mediaType = "application/vnd.example.config.v1+json"
|
||||
art = &artifact.Artifact{MediaType: mediaType}
|
||||
processor = &defaultProcessor{}
|
||||
typee = processor.GetArtifactType(nil, art)
|
||||
d.Equal(ArtifactTypeUnknown, typee)
|
||||
|
||||
mediaType = "application/vnd.cncf.helm.chart.config.v1+json"
|
||||
art = &artifact.Artifact{MediaType: mediaType}
|
||||
processor = &defaultProcessor{}
|
||||
|
@ -229,19 +282,53 @@ func (d *defaultProcessorTestSuite) TestAbstractMetadata() {
|
|||
d.Len(art.ExtraAttrs, 12)
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithUnknownConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(v2ManifestWithUnknownConfig))
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataOfOCIManifesttWithUnknownJsonConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithUnknownJsonConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
configBlob := io.NopCloser(strings.NewReader(unknownConfig))
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(0), configBlob, nil)
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType}
|
||||
err = d.processor.AbstractMetadata(nil, art, content)
|
||||
configBlob := io.NopCloser(strings.NewReader(UnknownJsonConfig))
|
||||
metadata := map[string]interface{}{}
|
||||
err = json.NewDecoder(configBlob).Decode(&metadata)
|
||||
d.Require().Nil(err)
|
||||
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.example.config.v1+json"}
|
||||
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(129), configBlob, nil)
|
||||
d.parser.On("Parse", context.TODO(), mock.AnythingOfType("*artifact.Artifact"), mock.AnythingOfType("[]byte")).Return(nil)
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Require().Nil(err)
|
||||
d.Len(art.ExtraAttrs, 0)
|
||||
d.Len(unknownConfig, 35)
|
||||
d.NotEqual(art.ExtraAttrs, len(metadata))
|
||||
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithUnknownConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithUnknownConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
configBlob := io.NopCloser(strings.NewReader(UnknownConfig))
|
||||
d.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(0), configBlob, nil)
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.nhl.peanut.butter.bagel"}
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Require().Nil(err)
|
||||
d.Len(art.ExtraAttrs, 0)
|
||||
}
|
||||
|
||||
func (d *defaultProcessorTestSuite) TestAbstractMetadataWithEmptyConfig() {
|
||||
manifest, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(OCIManifestWithEmptyConfig))
|
||||
d.Require().Nil(err)
|
||||
manifestMediaType, content, err := manifest.Payload()
|
||||
d.Require().Nil(err)
|
||||
|
||||
art := &artifact.Artifact{ManifestMediaType: manifestMediaType, MediaType: "application/vnd.oci.empty.v1+json"}
|
||||
err = d.processor.AbstractMetadata(context.TODO(), art, content)
|
||||
d.Assert().Equal(0, len(art.ExtraAttrs))
|
||||
d.Assert().Equal(2, len(emptyConfig))
|
||||
d.Require().Nil(err)
|
||||
}
|
||||
|
||||
func TestDefaultProcessorTestSuite(t *testing.T) {
|
||||
|
|
|
@ -44,6 +44,6 @@ type indexProcessor struct {
|
|||
*base.IndexProcessor
|
||||
}
|
||||
|
||||
func (i *indexProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (i *indexProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeImage
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ func init() {
|
|||
type manifestV1Processor struct {
|
||||
}
|
||||
|
||||
func (m *manifestV1Processor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, manifest []byte) error {
|
||||
func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *artifact.Artifact, manifest []byte) error {
|
||||
mani := &schema1.Manifest{}
|
||||
if err := json.Unmarshal(manifest, mani); err != nil {
|
||||
return err
|
||||
|
@ -50,15 +50,15 @@ func (m *manifestV1Processor) AbstractMetadata(ctx context.Context, artifact *ar
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *manifestV1Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
func (m *manifestV1Processor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
|
||||
return nil, errors.New(nil).WithCode(errors.BadRequestCode).
|
||||
WithMessage("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
|
||||
}
|
||||
|
||||
func (m *manifestV1Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (m *manifestV1Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeImage
|
||||
}
|
||||
|
||||
func (m *manifestV1Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (m *manifestV1Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -112,10 +112,10 @@ func (m *manifestV2Processor) AbstractAddition(ctx context.Context, artifact *ar
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *manifestV2Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (m *manifestV2Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeImage
|
||||
}
|
||||
|
||||
func (m *manifestV2Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (m *manifestV2Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return []string{AdditionTypeBuildHistory}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/base"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
)
|
||||
|
||||
const (
|
||||
// ArtifactTypeSBOM is the artifact type for SBOM, it's scope is only used in the processor
|
||||
ArtifactTypeSBOM = "SBOM"
|
||||
// processorMediaType is the media type for SBOM, it's scope is only used to register the processor
|
||||
processorMediaType = "application/vnd.goharbor.harbor.sbom.v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
pc := &Processor{}
|
||||
pc.ManifestProcessor = base.NewManifestProcessor()
|
||||
if err := processor.Register(pc, processorMediaType); err != nil {
|
||||
log.Errorf("failed to register processor for media type %s: %v", processorMediaType, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Processor is the processor for SBOM
|
||||
type Processor struct {
|
||||
*base.ManifestProcessor
|
||||
}
|
||||
|
||||
// AbstractAddition returns the addition for SBOM
|
||||
func (m *Processor) AbstractAddition(_ context.Context, art *artifact.Artifact, _ string) (*processor.Addition, error) {
|
||||
man, _, err := m.RegCli.PullManifest(art.RepositoryName, art.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to pull manifest")
|
||||
}
|
||||
_, payload, err := man.Payload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get payload")
|
||||
}
|
||||
manifest := &v1.Manifest{}
|
||||
if err := json.Unmarshal(payload, manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SBOM artifact should only have one layer
|
||||
if len(manifest.Layers) != 1 {
|
||||
return nil, errors.New(nil).WithCode(errors.NotFoundCode).WithMessage("The sbom is not found")
|
||||
}
|
||||
layerDgst := manifest.Layers[0].Digest.String()
|
||||
_, blob, err := m.RegCli.PullBlob(art.RepositoryName, layerDgst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to pull the blob")
|
||||
}
|
||||
defer blob.Close()
|
||||
content, err := io.ReadAll(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &processor.Addition{
|
||||
Content: content,
|
||||
ContentType: processorMediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetArtifactType the artifact type is used to display the artifact type in the UI
|
||||
func (m *Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeSBOM
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/base"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
"github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/testing/pkg/registry"
|
||||
)
|
||||
|
||||
type SBOMProcessorTestSuite struct {
|
||||
suite.Suite
|
||||
processor *Processor
|
||||
regCli *registry.Client
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) SetupSuite() {
|
||||
suite.regCli = ®istry.Client{}
|
||||
suite.processor = &Processor{
|
||||
&base.ManifestProcessor{
|
||||
RegCli: suite.regCli,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionNormal() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
}]
|
||||
}`
|
||||
sbomContent := "this is a sbom content"
|
||||
reader := strings.NewReader(sbomContent)
|
||||
blobReader := io.NopCloser(reader)
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
suite.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(123), blobReader, nil).Once()
|
||||
addition, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.Nil(err)
|
||||
suite.Equal(sbomContent, string(addition.Content))
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionMultiLayer() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 843,
|
||||
"digest": "sha256:def"
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 531,
|
||||
"digest": "sha256:123"
|
||||
}
|
||||
]
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
_, err = suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionPullBlobError() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"size": 32654,
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
]
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
suite.regCli.On("PullBlob", mock.Anything, mock.Anything).Return(int64(123), nil, errors.NotFoundError(fmt.Errorf("not found"))).Once()
|
||||
addition, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
suite.Nil(addition)
|
||||
}
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionNoSBOMLayer() {
|
||||
manContent := `{
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:e91b9dfcbbb3b88bac94726f276b89de46e4460b55f6e6d6f876e666b150ec5b",
|
||||
"size": 498
|
||||
}
|
||||
}`
|
||||
mani, _, err := distribution.UnmarshalManifest(v1.MediaTypeImageManifest, []byte(manContent))
|
||||
suite.Require().NoError(err)
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(mani, "sha256:123", nil).Once()
|
||||
_, err = suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestAbstractAdditionPullManifestError() {
|
||||
suite.regCli.On("PullManifest", mock.Anything, mock.Anything).Return(nil, "sha256:123", errors.NotFoundError(fmt.Errorf("not found"))).Once()
|
||||
_, err := suite.processor.AbstractAddition(context.Background(), &artifact.Artifact{RepositoryName: "repo", Digest: "digest"}, "sbom")
|
||||
suite.NotNil(err)
|
||||
|
||||
}
|
||||
|
||||
func (suite *SBOMProcessorTestSuite) TestGetArtifactType() {
|
||||
suite.Equal(ArtifactTypeSBOM, suite.processor.GetArtifactType(context.Background(), &artifact.Artifact{}))
|
||||
}
|
||||
|
||||
func TestSBOMProcessorTestSuite(t *testing.T) {
|
||||
suite.Run(t, &SBOMProcessorTestSuite{})
|
||||
}
|
|
@ -128,10 +128,10 @@ func (m *Processor) AbstractAddition(ctx context.Context, artifact *artifact.Art
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (m *Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string {
|
||||
func (m *Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
|
||||
return ArtifactTypeWASM
|
||||
}
|
||||
|
||||
func (m *Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string {
|
||||
func (m *Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
|
||||
return []string{AdditionTypeBuildHistory}
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]interface{}, mgr
|
|||
}
|
||||
|
||||
// verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend.
|
||||
func verifyValueLengthCfg(ctx context.Context, cfgs map[string]interface{}) error {
|
||||
func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error {
|
||||
maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber)
|
||||
validateCfgs := []string{
|
||||
common.TokenExpiration,
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/artifact/processor/sbom"
|
||||
"github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/controller/event/operator"
|
||||
"github.com/goharbor/harbor/src/controller/repository"
|
||||
|
@ -36,6 +37,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/pkg"
|
||||
pkgArt "github.com/goharbor/harbor/src/pkg/artifact"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/report"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
)
|
||||
|
||||
|
@ -159,7 +161,7 @@ func (a *ArtifactEventHandler) onPull(ctx context.Context, event *event.Artifact
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *ArtifactEventHandler) updatePullTimeInCache(ctx context.Context, event *event.ArtifactEvent) {
|
||||
func (a *ArtifactEventHandler) updatePullTimeInCache(_ context.Context, event *event.ArtifactEvent) {
|
||||
var tagName string
|
||||
if len(event.Tags) != 0 {
|
||||
tagName = event.Tags[0]
|
||||
|
@ -173,7 +175,7 @@ func (a *ArtifactEventHandler) updatePullTimeInCache(ctx context.Context, event
|
|||
a.pullTimeStore[key] = time.Now()
|
||||
}
|
||||
|
||||
func (a *ArtifactEventHandler) addPullCountInCache(ctx context.Context, event *event.ArtifactEvent) {
|
||||
func (a *ArtifactEventHandler) addPullCountInCache(_ context.Context, event *event.ArtifactEvent) {
|
||||
a.pullCountLock.Lock()
|
||||
defer a.pullCountLock.Unlock()
|
||||
|
||||
|
@ -258,6 +260,11 @@ func (a *ArtifactEventHandler) onPush(ctx context.Context, event *event.Artifact
|
|||
if err := autoScan(ctx, &artifact.Artifact{Artifact: *event.Artifact}, event.Tags...); err != nil {
|
||||
log.Errorf("scan artifact %s@%s failed, error: %v", event.Artifact.RepositoryName, event.Artifact.Digest, err)
|
||||
}
|
||||
|
||||
log.Debugf("auto generate sbom is triggered for artifact event %+v", event)
|
||||
if err := autoGenSBOM(ctx, &artifact.Artifact{Artifact: *event.Artifact}); err != nil {
|
||||
log.Errorf("generate sbom for artifact %s@%s failed, error: %v", event.Artifact.RepositoryName, event.Artifact.Digest, err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
|
@ -314,6 +321,11 @@ func (a *ArtifactEventHandler) onDelete(ctx context.Context, event *event.Artifa
|
|||
log.Errorf("failed to delete scan reports of artifact %v, error: %v", unrefDigests, err)
|
||||
}
|
||||
|
||||
if event.Artifact.Type == sbom.ArtifactTypeSBOM && len(event.Artifact.Digest) > 0 {
|
||||
if err := reportMgr.DeleteByExtraAttr(ctx, v1.MimeTypeSBOMReport, "sbom_digest", event.Artifact.Digest); err != nil {
|
||||
log.Errorf("failed to delete scan reports of with sbom digest %v, error: %v", event.Artifact.Digest, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,9 @@ import (
|
|||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/controller/scan"
|
||||
"github.com/goharbor/harbor/src/lib/log"
|
||||
"github.com/goharbor/harbor/src/lib/orm"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
)
|
||||
|
||||
// autoScan scan artifact when the project of the artifact enable auto scan
|
||||
|
@ -37,9 +39,26 @@ func autoScan(ctx context.Context, a *artifact.Artifact, tags ...string) error {
|
|||
return orm.WithTransaction(func(ctx context.Context) error {
|
||||
options := []scan.Option{}
|
||||
if len(tags) > 0 {
|
||||
options = append(options, scan.WithTag(tags[0]))
|
||||
options = append(options, scan.WithTag(tags[0]), scan.WithFromEvent(true))
|
||||
}
|
||||
|
||||
return scan.DefaultController.Scan(ctx, a, options...)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-auto-scan"))
|
||||
}
|
||||
|
||||
func autoGenSBOM(ctx context.Context, a *artifact.Artifact) error {
|
||||
proj, err := project.Ctl.Get(ctx, a.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !proj.AutoSBOMGen() {
|
||||
return nil
|
||||
}
|
||||
// transaction here to work with the image index
|
||||
return orm.WithTransaction(func(ctx context.Context) error {
|
||||
options := []scan.Option{}
|
||||
options = append(options, scan.WithScanType(v1.ScanTypeSbom), scan.WithFromEvent(true))
|
||||
log.Debugf("sbom scan controller artifact %+v, options %+v", a, options)
|
||||
return scan.DefaultController.Scan(ctx, a, options...)
|
||||
})(orm.SetTransactionOpNameToContext(ctx, "tx-auto-gen-sbom"))
|
||||
}
|
||||
|
|
|
@ -95,6 +95,34 @@ func (suite *AutoScanTestSuite) TestAutoScan() {
|
|||
suite.Nil(autoScan(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanSBOM() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
proModels.ProMetaAutoSBOMGen: "true",
|
||||
},
|
||||
}, nil)
|
||||
suite.scanController.On("Scan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
art := &artifact.Artifact{}
|
||||
|
||||
suite.Nil(autoGenSBOM(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanSBOMFalse() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
proModels.ProMetaAutoSBOMGen: "false",
|
||||
},
|
||||
}, nil)
|
||||
|
||||
suite.scanController.On("Scan", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
art := &artifact.Artifact{}
|
||||
|
||||
suite.Nil(autoGenSBOM(ctx, art))
|
||||
}
|
||||
|
||||
func (suite *AutoScanTestSuite) TestAutoScanFailed() {
|
||||
mock.OnAnything(suite.projectController, "Get").Return(&proModels.Project{
|
||||
Metadata: map[string]string{
|
||||
|
|
|
@ -216,7 +216,9 @@ func constructReplicationPayload(ctx context.Context, event *event.ReplicationEv
|
|||
|
||||
func getMetadataFromResource(resource string) (namespace, nameAndTag string) {
|
||||
// Usually resource format likes 'library/busybox:v1', but it could be 'busybox:v1' in docker registry
|
||||
meta := strings.Split(resource, "/")
|
||||
// It also could be 'library/bitnami/fluentd:1.13.3-debian-10-r0' so we need to split resource to only 2 parts
|
||||
// possible namespace and image name which may include slashes for example: bitnami/fluentd:1.13.3-debian-10-r0
|
||||
meta := strings.SplitN(resource, "/", 2)
|
||||
if len(meta) == 1 {
|
||||
return "", meta[0]
|
||||
}
|
||||
|
|
|
@ -146,3 +146,21 @@ func TestIsLocalRegistry(t *testing.T) {
|
|||
}
|
||||
assert.False(t, isLocalRegistry(reg2))
|
||||
}
|
||||
|
||||
func TestReplicationHandler_ShortResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("busybox:v1")
|
||||
assert.Equal(t, "", namespace)
|
||||
assert.Equal(t, "busybox:v1", resource)
|
||||
}
|
||||
|
||||
func TestReplicationHandler_NormalResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("library/busybox:v1")
|
||||
assert.Equal(t, "library", namespace)
|
||||
assert.Equal(t, "busybox:v1", resource)
|
||||
}
|
||||
|
||||
func TestReplicationHandler_LongResourceName(t *testing.T) {
|
||||
namespace, resource := getMetadataFromResource("library/bitnami/fluentd:1.13.3-debian-10-r0")
|
||||
assert.Equal(t, "library", namespace)
|
||||
assert.Equal(t, "bitnami/fluentd:1.13.3-debian-10-r0", resource)
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/event"
|
||||
"github.com/goharbor/harbor/src/controller/event/handler/util"
|
||||
eventModel "github.com/goharbor/harbor/src/controller/event/model"
|
||||
"github.com/goharbor/harbor/src/controller/project"
|
||||
"github.com/goharbor/harbor/src/controller/scan"
|
||||
"github.com/goharbor/harbor/src/lib/errors"
|
||||
|
@ -104,6 +105,9 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
|||
RepoFullName: event.Artifact.Repository,
|
||||
RepoType: repoType,
|
||||
},
|
||||
Scan: &eventModel.Scan{
|
||||
ScanType: event.ScanType,
|
||||
},
|
||||
},
|
||||
Operator: event.Operator,
|
||||
}
|
||||
|
@ -138,17 +142,29 @@ func constructScanImagePayload(ctx context.Context, event *event.ScanImageEvent,
|
|||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Add scan overview
|
||||
summaries, err := scan.DefaultController.GetSummary(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
scanSummaries := map[string]interface{}{}
|
||||
if event.ScanType == v1.ScanTypeVulnerability {
|
||||
scanSummaries, err = scan.DefaultController.GetSummary(ctx, art, []string{v1.MimeTypeNativeReport, v1.MimeTypeGenericVulnerabilityReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
}
|
||||
}
|
||||
|
||||
sbomOverview := map[string]interface{}{}
|
||||
if event.ScanType == v1.ScanTypeSbom {
|
||||
sbomOverview, err = scan.DefaultController.GetSummary(ctx, art, []string{v1.MimeTypeSBOMReport})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "construct scan payload")
|
||||
}
|
||||
}
|
||||
|
||||
// Add scan overview and sbom overview
|
||||
resource := &model.Resource{
|
||||
Tag: event.Artifact.Tag,
|
||||
Digest: event.Artifact.Digest,
|
||||
ResourceURL: resURL,
|
||||
ScanOverview: summaries,
|
||||
ScanOverview: scanSummaries,
|
||||
SBOMOverview: sbomOverview,
|
||||
}
|
||||
payload.EventData.Resources = append(payload.EventData.Resources, resource)
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
// ScanImageMetaData defines meta data of image scanning event
|
||||
type ScanImageMetaData struct {
|
||||
Artifact *v1.Artifact
|
||||
ScanType string
|
||||
Status string
|
||||
Operator string
|
||||
}
|
||||
|
@ -55,6 +56,7 @@ func (si *ScanImageMetaData) Resolve(evt *event.Event) error {
|
|||
Artifact: si.Artifact,
|
||||
OccurAt: time.Now(),
|
||||
Operator: si.Operator,
|
||||
ScanType: si.ScanType,
|
||||
}
|
||||
|
||||
evt.Topic = topic
|
||||
|
|
|
@ -74,3 +74,9 @@ type RetentionRule struct {
|
|||
// Selector attached to the rule for filtering scope (e.g: repositories or namespaces)
|
||||
ScopeSelectors map[string][]*rule.Selector `json:"scope_selectors,omitempty"`
|
||||
}
|
||||
|
||||
// Scan describes scan infos
|
||||
type Scan struct {
|
||||
// ScanType the scan type
|
||||
ScanType string `json:"scan_type,omitempty"`
|
||||
}
|
||||
|
|
|
@ -159,7 +159,7 @@ func (p *PushArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
|||
ResourceType: "artifact"}
|
||||
|
||||
if len(p.Tags) == 0 {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
auditLog.Resource = fmt.Sprintf("%s@%s",
|
||||
p.Artifact.RepositoryName, p.Artifact.Digest)
|
||||
} else {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
|
@ -188,7 +188,7 @@ func (p *PullArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
|||
ResourceType: "artifact"}
|
||||
|
||||
if len(p.Tags) == 0 {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
auditLog.Resource = fmt.Sprintf("%s@%s",
|
||||
p.Artifact.RepositoryName, p.Artifact.Digest)
|
||||
} else {
|
||||
auditLog.Resource = fmt.Sprintf("%s:%s",
|
||||
|
@ -222,7 +222,7 @@ func (d *DeleteArtifactEvent) ResolveToAuditLog() (*model.AuditLog, error) {
|
|||
Operation: rbac.ActionDelete.String(),
|
||||
Username: d.Operator,
|
||||
ResourceType: "artifact",
|
||||
Resource: fmt.Sprintf("%s:%s", d.Artifact.RepositoryName, d.Artifact.Digest)}
|
||||
Resource: fmt.Sprintf("%s@%s", d.Artifact.RepositoryName, d.Artifact.Digest)}
|
||||
return auditLog, nil
|
||||
}
|
||||
|
||||
|
@ -289,6 +289,7 @@ func (d *DeleteTagEvent) String() string {
|
|||
// ScanImageEvent is scanning image related event data to publish
|
||||
type ScanImageEvent struct {
|
||||
EventType string
|
||||
ScanType string
|
||||
Artifact *v1.Artifact
|
||||
OccurAt time.Time
|
||||
Operator string
|
||||
|
|
|
@ -52,7 +52,7 @@ func gcCallback(ctx context.Context, p string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func gcTaskStatusChange(ctx context.Context, taskID int64, status string) error {
|
||||
func gcTaskStatusChange(ctx context.Context, _ int64, status string) error {
|
||||
if status == job.SuccessStatus.String() && config.QuotaPerProjectEnable(ctx) {
|
||||
go func() {
|
||||
err := quota.RefreshForProjects(orm.Context())
|
||||
|
|
|
@ -41,7 +41,7 @@ type Controller interface {
|
|||
|
||||
type controller struct{}
|
||||
|
||||
func (c *controller) GetHealth(ctx context.Context) *OverallHealthStatus {
|
||||
func (c *controller) GetHealth(_ context.Context) *OverallHealthStatus {
|
||||
var isHealthy healthy = true
|
||||
components := []*ComponentHealthStatus{}
|
||||
ch := make(chan *ComponentHealthStatus, len(registry))
|
||||
|
|
|
@ -69,6 +69,10 @@ var (
|
|||
path: "./icons/wasm.png",
|
||||
resize: true,
|
||||
},
|
||||
icon.DigestOfIconAccSBOM: {
|
||||
path: "./icons/sbom.png",
|
||||
resize: true,
|
||||
},
|
||||
icon.DigestOfIconDefault: {
|
||||
path: "./icons/default.png",
|
||||
resize: true,
|
||||
|
|
|
@ -495,7 +495,7 @@ func (c *controller) ListPoliciesByProject(ctx context.Context, project int64, q
|
|||
}
|
||||
|
||||
// CheckHealth checks the instance health, for test connection
|
||||
func (c *controller) CheckHealth(ctx context.Context, instance *providerModels.Instance) error {
|
||||
func (c *controller) CheckHealth(_ context.Context, instance *providerModels.Instance) error {
|
||||
if instance == nil {
|
||||
return errors.New("instance can not be nil")
|
||||
}
|
||||
|
|
|
@ -150,9 +150,9 @@ func (c *controller) Exists(ctx context.Context, projectIDOrName interface{}) (b
|
|||
return true, nil
|
||||
} else if errors.IsNotFoundErr(err) {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
// else
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (c *controller) Get(ctx context.Context, projectIDOrName interface{}, options ...Option) (*models.Project, error) {
|
||||
|
|
|
@ -101,7 +101,7 @@ func (c *controller) EnsureTag(ctx context.Context, art lib.ArtifactInfo, tagNam
|
|||
// search the digest in cache and query with trimmed digest
|
||||
var trimmedDigest string
|
||||
err := c.cache.Fetch(ctx, TrimmedManifestlist+art.Digest, &trimmedDigest)
|
||||
if errors.Is(err, cache.ErrNotFound) {
|
||||
if errors.Is(err, cache.ErrNotFound) { // nolint:revive
|
||||
// skip to update digest, continue
|
||||
} else if err != nil {
|
||||
// for other error, return
|
||||
|
@ -183,7 +183,10 @@ func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo,
|
|||
if c.cache == nil {
|
||||
return a != nil && string(desc.Digest) == a.Digest, nil, nil // digest matches
|
||||
}
|
||||
|
||||
// Pass digest to the cache key, digest is more stable than tag, because tag could be updated
|
||||
if len(art.Digest) == 0 {
|
||||
art.Digest = string(desc.Digest)
|
||||
}
|
||||
err = c.cache.Fetch(ctx, manifestListKey(art.Repository, art), &content)
|
||||
if err != nil {
|
||||
if errors.Is(err, cache.ErrNotFound) {
|
||||
|
@ -260,7 +263,7 @@ func (c *controller) ProxyManifest(ctx context.Context, art lib.ArtifactInfo, re
|
|||
return man, nil
|
||||
}
|
||||
|
||||
func (c *controller) HeadManifest(ctx context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *distribution.Descriptor, error) {
|
||||
func (c *controller) HeadManifest(_ context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *distribution.Descriptor, error) {
|
||||
remoteRepo := getRemoteRepo(art)
|
||||
ref := getReference(art)
|
||||
return remote.ManifestExist(remoteRepo, ref)
|
||||
|
@ -318,8 +321,8 @@ func getRemoteRepo(art lib.ArtifactInfo) string {
|
|||
}
|
||||
|
||||
func getReference(art lib.ArtifactInfo) string {
|
||||
if len(art.Tag) > 0 {
|
||||
return art.Tag
|
||||
if len(art.Digest) > 0 {
|
||||
return art.Digest
|
||||
}
|
||||
return art.Digest
|
||||
return art.Tag
|
||||
}
|
||||
|
|
|
@ -209,7 +209,7 @@ func TestGetRef(t *testing.T) {
|
|||
{
|
||||
name: `normal`,
|
||||
in: lib.ArtifactInfo{Repository: "hello-world", Tag: "latest", Digest: "sha256:aabbcc"},
|
||||
want: "latest",
|
||||
want: "sha256:aabbcc",
|
||||
},
|
||||
{
|
||||
name: `digest_only`,
|
||||
|
|
|
@ -81,7 +81,7 @@ func newLocalHelper() localInterface {
|
|||
return l
|
||||
}
|
||||
|
||||
func (l *localHelper) BlobExist(ctx context.Context, art lib.ArtifactInfo) (bool, error) {
|
||||
func (l *localHelper) BlobExist(_ context.Context, art lib.ArtifactInfo) (bool, error) {
|
||||
return l.registry.BlobExist(art.Repository, art.Digest)
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ type ManifestListCache struct {
|
|||
}
|
||||
|
||||
// CacheContent ...
|
||||
func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) {
|
||||
func (m *ManifestListCache) CacheContent(ctx context.Context, _ string, man distribution.Manifest, art lib.ArtifactInfo, _ RemoteInterface, contentType string) {
|
||||
_, payload, err := man.Payload()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get payload, error %v", err)
|
||||
|
@ -73,7 +73,10 @@ func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string,
|
|||
log.Errorf("failed to get reference, reference is empty, skip to cache manifest list")
|
||||
return
|
||||
}
|
||||
// some registry will not return the digest in the HEAD request, if no digest returned, cache manifest list content with tag
|
||||
// cache key should contain digest if digest exist
|
||||
if len(art.Digest) == 0 {
|
||||
art.Digest = string(digest.FromBytes(payload))
|
||||
}
|
||||
key := manifestListKey(art.Repository, art)
|
||||
log.Debugf("cache manifest list with key=cache:%v", key)
|
||||
if err := m.cache.Save(ctx, manifestListContentTypeKey(art.Repository, art), contentType, manifestListCacheInterval); err != nil {
|
||||
|
@ -171,7 +174,7 @@ type ManifestCache struct {
|
|||
}
|
||||
|
||||
// CacheContent ...
|
||||
func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) {
|
||||
func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, _ string) {
|
||||
var waitBlobs []distribution.Descriptor
|
||||
for n := 0; n < maxManifestWait; n++ {
|
||||
time.Sleep(sleepIntervalSec * time.Second)
|
||||
|
|
|
@ -464,7 +464,7 @@ func (c *controller) Update(ctx context.Context, u *quota.Quota) error {
|
|||
}
|
||||
|
||||
// Driver returns quota driver for the reference
|
||||
func Driver(ctx context.Context, reference string) (driver.Driver, error) {
|
||||
func Driver(_ context.Context, reference string) (driver.Driver, error) {
|
||||
d, ok := driver.Get(reference)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("quota not support for %s", reference)
|
||||
|
|
|
@ -43,7 +43,7 @@ type driver struct {
|
|||
blobCtl blob.Controller
|
||||
}
|
||||
|
||||
func (d *driver) Enabled(ctx context.Context, key string) (bool, error) {
|
||||
func (d *driver) Enabled(ctx context.Context, _ string) (bool, error) {
|
||||
// NOTE: every time load the new configurations from the db to get the latest configurations may have performance problem.
|
||||
if err := d.cfg.Load(ctx); err != nil {
|
||||
return false, err
|
||||
|
|
|
@ -154,11 +154,8 @@ func (c *controller) Start(ctx context.Context, policy *replicationmodel.Policy,
|
|||
func (c *controller) markError(ctx context.Context, executionID int64, err error) {
|
||||
logger := log.GetLogger(ctx)
|
||||
// try to stop the execution first in case that some tasks are already created
|
||||
if err := c.execMgr.StopAndWait(ctx, executionID, 10*time.Second); err != nil {
|
||||
logger.Errorf("failed to stop the execution %d: %v", executionID, err)
|
||||
}
|
||||
if err := c.execMgr.MarkError(ctx, executionID, err.Error()); err != nil {
|
||||
logger.Errorf("failed to mark error for the execution %d: %v", executionID, err)
|
||||
if e := c.execMgr.StopAndWaitWithError(ctx, executionID, 10*time.Second, err); e != nil {
|
||||
logger.Errorf("failed to stop the execution %d: %v", executionID, e)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -75,8 +75,7 @@ func (r *replicationTestSuite) TestStart() {
|
|||
// got error when running the replication flow
|
||||
r.execMgr.On("Create", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(int64(1), nil)
|
||||
r.execMgr.On("Get", mock.Anything, mock.Anything).Return(&task.Execution{}, nil)
|
||||
r.execMgr.On("StopAndWait", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.execMgr.On("MarkError", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.execMgr.On("StopAndWaitWithError", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
r.flowCtl.On("Start", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(fmt.Errorf("error"))
|
||||
r.ormCreator.On("Create").Return(nil)
|
||||
id, err = r.ctl.Start(context.Background(), &repctlmodel.Policy{Enabled: true}, nil, task.ExecutionTriggerManual)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
|
@ -18,6 +18,10 @@ type mockFactory struct {
|
|||
func (_m *mockFactory) AdapterPattern() *model.AdapterPattern {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AdapterPattern")
|
||||
}
|
||||
|
||||
var r0 *model.AdapterPattern
|
||||
if rf, ok := ret.Get(0).(func() *model.AdapterPattern); ok {
|
||||
r0 = rf()
|
||||
|
@ -34,6 +38,10 @@ func (_m *mockFactory) AdapterPattern() *model.AdapterPattern {
|
|||
func (_m *mockFactory) Create(_a0 *model.Registry) (adapter.Adapter, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Create")
|
||||
}
|
||||
|
||||
var r0 adapter.Adapter
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(*model.Registry) (adapter.Adapter, error)); ok {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
|
||||
package flow
|
||||
|
||||
|
@ -21,6 +21,10 @@ type mockAdapter struct {
|
|||
func (_m *mockAdapter) BlobExist(repository string, digest string) (bool, error) {
|
||||
ret := _m.Called(repository, digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for BlobExist")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) (bool, error)); ok {
|
||||
|
@ -45,6 +49,10 @@ func (_m *mockAdapter) BlobExist(repository string, digest string) (bool, error)
|
|||
func (_m *mockAdapter) CanBeMount(digest string) (bool, string, error) {
|
||||
ret := _m.Called(digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CanBeMount")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 string
|
||||
var r2 error
|
||||
|
@ -76,6 +84,10 @@ func (_m *mockAdapter) CanBeMount(digest string) (bool, string, error) {
|
|||
func (_m *mockAdapter) DeleteManifest(repository string, reference string) error {
|
||||
ret := _m.Called(repository, reference)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteManifest")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(repository, reference)
|
||||
|
@ -90,6 +102,10 @@ func (_m *mockAdapter) DeleteManifest(repository string, reference string) error
|
|||
func (_m *mockAdapter) DeleteTag(repository string, tag string) error {
|
||||
ret := _m.Called(repository, tag)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteTag")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(repository, tag)
|
||||
|
@ -104,6 +120,10 @@ func (_m *mockAdapter) DeleteTag(repository string, tag string) error {
|
|||
func (_m *mockAdapter) FetchArtifacts(filters []*model.Filter) ([]*model.Resource, error) {
|
||||
ret := _m.Called(filters)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for FetchArtifacts")
|
||||
}
|
||||
|
||||
var r0 []*model.Resource
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func([]*model.Filter) ([]*model.Resource, error)); ok {
|
||||
|
@ -130,6 +150,10 @@ func (_m *mockAdapter) FetchArtifacts(filters []*model.Filter) ([]*model.Resourc
|
|||
func (_m *mockAdapter) HealthCheck() (string, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for HealthCheck")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (string, error)); ok {
|
||||
|
@ -154,6 +178,10 @@ func (_m *mockAdapter) HealthCheck() (string, error) {
|
|||
func (_m *mockAdapter) Info() (*model.RegistryInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Info")
|
||||
}
|
||||
|
||||
var r0 *model.RegistryInfo
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func() (*model.RegistryInfo, error)); ok {
|
||||
|
@ -180,6 +208,10 @@ func (_m *mockAdapter) Info() (*model.RegistryInfo, error) {
|
|||
func (_m *mockAdapter) ListTags(repository string) ([]string, error) {
|
||||
ret := _m.Called(repository)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListTags")
|
||||
}
|
||||
|
||||
var r0 []string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string) ([]string, error)); ok {
|
||||
|
@ -206,6 +238,10 @@ func (_m *mockAdapter) ListTags(repository string) ([]string, error) {
|
|||
func (_m *mockAdapter) ManifestExist(repository string, reference string) (bool, *distribution.Descriptor, error) {
|
||||
ret := _m.Called(repository, reference)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ManifestExist")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 *distribution.Descriptor
|
||||
var r2 error
|
||||
|
@ -239,6 +275,10 @@ func (_m *mockAdapter) ManifestExist(repository string, reference string) (bool,
|
|||
func (_m *mockAdapter) MountBlob(srcRepository string, digest string, dstRepository string) error {
|
||||
ret := _m.Called(srcRepository, digest, dstRepository)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for MountBlob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, string) error); ok {
|
||||
r0 = rf(srcRepository, digest, dstRepository)
|
||||
|
@ -253,6 +293,10 @@ func (_m *mockAdapter) MountBlob(srcRepository string, digest string, dstReposit
|
|||
func (_m *mockAdapter) PrepareForPush(_a0 []*model.Resource) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PrepareForPush")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func([]*model.Resource) error); ok {
|
||||
r0 = rf(_a0)
|
||||
|
@ -267,6 +311,10 @@ func (_m *mockAdapter) PrepareForPush(_a0 []*model.Resource) error {
|
|||
func (_m *mockAdapter) PullBlob(repository string, digest string) (int64, io.ReadCloser, error) {
|
||||
ret := _m.Called(repository, digest)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullBlob")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 io.ReadCloser
|
||||
var r2 error
|
||||
|
@ -300,6 +348,10 @@ func (_m *mockAdapter) PullBlob(repository string, digest string) (int64, io.Rea
|
|||
func (_m *mockAdapter) PullBlobChunk(repository string, digest string, blobSize int64, start int64, end int64) (int64, io.ReadCloser, error) {
|
||||
ret := _m.Called(repository, digest, blobSize, start, end)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullBlobChunk")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 io.ReadCloser
|
||||
var r2 error
|
||||
|
@ -340,6 +392,10 @@ func (_m *mockAdapter) PullManifest(repository string, reference string, acceptt
|
|||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PullManifest")
|
||||
}
|
||||
|
||||
var r0 distribution.Manifest
|
||||
var r1 string
|
||||
var r2 error
|
||||
|
@ -373,6 +429,10 @@ func (_m *mockAdapter) PullManifest(repository string, reference string, acceptt
|
|||
func (_m *mockAdapter) PushBlob(repository string, digest string, size int64, blob io.Reader) error {
|
||||
ret := _m.Called(repository, digest, size, blob)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushBlob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, int64, io.Reader) error); ok {
|
||||
r0 = rf(repository, digest, size, blob)
|
||||
|
@ -387,6 +447,10 @@ func (_m *mockAdapter) PushBlob(repository string, digest string, size int64, bl
|
|||
func (_m *mockAdapter) PushBlobChunk(repository string, digest string, size int64, chunk io.Reader, start int64, end int64, location string) (string, int64, error) {
|
||||
ret := _m.Called(repository, digest, size, chunk, start, end, location)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushBlobChunk")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 int64
|
||||
var r2 error
|
||||
|
@ -418,6 +482,10 @@ func (_m *mockAdapter) PushBlobChunk(repository string, digest string, size int6
|
|||
func (_m *mockAdapter) PushManifest(repository string, reference string, mediaType string, payload []byte) (string, error) {
|
||||
ret := _m.Called(repository, reference, mediaType, payload)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for PushManifest")
|
||||
}
|
||||
|
||||
var r0 string
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, string, []byte) (string, error)); ok {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.35.4. DO NOT EDIT.
|
||||
// Code generated by mockery v2.42.2. DO NOT EDIT.
|
||||
|
||||
package replication
|
||||
|
||||
|
@ -21,6 +21,10 @@ type flowController struct {
|
|||
func (_m *flowController) Start(ctx context.Context, executionID int64, policy *model.Policy, resource *regmodel.Resource) error {
|
||||
ret := _m.Called(ctx, executionID, policy, resource)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Start")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, *model.Policy, *regmodel.Resource) error); ok {
|
||||
r0 = rf(ctx, executionID, policy, resource)
|
||||
|
|
|
@ -326,7 +326,7 @@ func (t *transfer) copyChunkWithRetry(srcRepo, dstRepo, digest string, sizeFromD
|
|||
}
|
||||
|
||||
// tryMountBlob try to check existence and mount, return true if mounted.
|
||||
func (t *transfer) tryMountBlob(srcRepo, dstRepo, digest string) (bool, error) {
|
||||
func (t *transfer) tryMountBlob(_, dstRepo, digest string) (bool, error) {
|
||||
if t.shouldStop() {
|
||||
return false, errStopped
|
||||
}
|
||||
|
|
|
@ -280,12 +280,8 @@ func (r *defaultController) TriggerRetentionExec(ctx context.Context, policyID i
|
|||
if num, err := r.launcher.Launch(ctx, p, id, dryRun); err != nil {
|
||||
logger.Errorf("failed to launch the retention jobs, err: %v", err)
|
||||
|
||||
if err = r.execMgr.StopAndWait(ctx, id, 10*time.Second); err != nil {
|
||||
logger.Errorf("failed to stop the retention execution %d: %v", id, err)
|
||||
}
|
||||
|
||||
if err = r.execMgr.MarkError(ctx, id, err.Error()); err != nil {
|
||||
logger.Errorf("failed to mark error for the retention execution %d: %v", id, err)
|
||||
if e := r.execMgr.StopAndWaitWithError(ctx, id, 10*time.Second, err); e != nil {
|
||||
logger.Errorf("failed to stop the retention execution %d: %v", id, e)
|
||||
}
|
||||
} else if num == 0 {
|
||||
// no candidates, mark the execution as done directly
|
||||
|
|
|
@ -102,10 +102,6 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
|
|||
var expiresAt int64
|
||||
if r.Duration == -1 {
|
||||
expiresAt = -1
|
||||
} else if r.Duration == 0 {
|
||||
// system default robot duration
|
||||
r.Duration = int64(config.RobotTokenDuration(ctx))
|
||||
expiresAt = time.Now().AddDate(0, 0, config.RobotTokenDuration(ctx)).Unix()
|
||||
} else {
|
||||
durationStr := strconv.FormatInt(r.Duration, 10)
|
||||
duration, err := strconv.Atoi(durationStr)
|
||||
|
|
|
@ -17,6 +17,7 @@ package scan
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
ar "github.com/goharbor/harbor/src/controller/artifact"
|
||||
"github.com/goharbor/harbor/src/controller/event/operator"
|
||||
"github.com/goharbor/harbor/src/controller/robot"
|
||||
|
@ -49,6 +49,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/pkg/scan/postprocessors"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/report"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
sbomModel "github.com/goharbor/harbor/src/pkg/scan/sbom/model"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/vuln"
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
)
|
||||
|
@ -68,10 +69,11 @@ const (
|
|||
artfiactKey = "artifact"
|
||||
registrationKey = "registration"
|
||||
|
||||
artifactIDKey = "artifact_id"
|
||||
artifactTagKey = "artifact_tag"
|
||||
reportUUIDsKey = "report_uuids"
|
||||
robotIDKey = "robot_id"
|
||||
artifactIDKey = "artifact_id"
|
||||
artifactTagKey = "artifact_tag"
|
||||
reportUUIDsKey = "report_uuids"
|
||||
robotIDKey = "robot_id"
|
||||
enabledCapabilities = "enabled_capabilities"
|
||||
)
|
||||
|
||||
// uuidGenerator is a func template which is for generating UUID.
|
||||
|
@ -91,6 +93,7 @@ type launchScanJobParam struct {
|
|||
Artifact *ar.Artifact
|
||||
Tag string
|
||||
Reports []*scan.Report
|
||||
Type string
|
||||
}
|
||||
|
||||
// basicController is default implementation of api.Controller interface
|
||||
|
@ -193,6 +196,18 @@ func (bc *basicController) collectScanningArtifacts(ctx context.Context, r *scan
|
|||
return nil
|
||||
}
|
||||
|
||||
// because there are lots of in-toto sbom artifacts in dockerhub and replicated to Harbor, they are considered as image type
|
||||
// when scanning these type of sbom artifact, the scanner might assume it is image layer with tgz format, and if scanner read the layer with a stream of tgz,
|
||||
// it fail and close the stream abruptly and cause the pannic in the harbor core log
|
||||
// to avoid pannic, skip scan the in-toto sbom artifact sbom artifact
|
||||
unscannable, err := bc.ar.HasUnscannableLayer(ctx, a.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unscannable {
|
||||
return nil
|
||||
}
|
||||
|
||||
supported := hasCapability(r, a)
|
||||
|
||||
if !supported && a.IsImageIndex() {
|
||||
|
@ -242,23 +257,26 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
}
|
||||
|
||||
// Parse options
|
||||
opts, err := parseOptions(options...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "scan controller: scan")
|
||||
}
|
||||
|
||||
if !scannable {
|
||||
if opts.FromEvent {
|
||||
// skip to return err for event related scan
|
||||
return nil
|
||||
}
|
||||
return errors.BadRequestError(nil).WithMessage("the configured scanner %s does not support scanning artifact with mime type %s", r.Name, artifact.ManifestMediaType)
|
||||
}
|
||||
|
||||
var (
|
||||
errs []error
|
||||
launchScanJobParams []*launchScanJobParam
|
||||
)
|
||||
for _, art := range artifacts {
|
||||
reports, err := bc.makeReportPlaceholder(ctx, r, art)
|
||||
reports, err := bc.makeReportPlaceholder(ctx, r, art, opts)
|
||||
if err != nil {
|
||||
if errors.IsConflictErr(err) {
|
||||
errs = append(errs, err)
|
||||
|
@ -287,6 +305,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||
Artifact: art,
|
||||
Tag: tag,
|
||||
Reports: reports,
|
||||
Type: opts.GetScanType(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -308,6 +327,9 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||
"id": r.ID,
|
||||
"name": r.Name,
|
||||
},
|
||||
enabledCapabilities: map[string]interface{}{
|
||||
"type": opts.GetScanType(),
|
||||
},
|
||||
}
|
||||
if op := operator.FromContext(ctx); op != "" {
|
||||
extraAttrs["operator"] = op
|
||||
|
@ -324,7 +346,7 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||
for _, launchScanJobParam := range launchScanJobParams {
|
||||
launchScanJobParam.ExecutionID = opts.ExecutionID
|
||||
|
||||
if err := bc.launchScanJob(ctx, launchScanJobParam); err != nil {
|
||||
if err := bc.launchScanJob(ctx, launchScanJobParam, opts); err != nil {
|
||||
log.G(ctx).Warningf("scan artifact %s@%s failed, error: %v", artifact.RepositoryName, artifact.Digest, err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
@ -339,15 +361,16 @@ func (bc *basicController) Scan(ctx context.Context, artifact *ar.Artifact, opti
|
|||
}
|
||||
|
||||
// Stop scan job of a given artifact
|
||||
func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact) error {
|
||||
func (bc *basicController) Stop(ctx context.Context, artifact *ar.Artifact, capType string) error {
|
||||
if artifact == nil {
|
||||
return errors.New("nil artifact to stop scan")
|
||||
}
|
||||
query := q.New(q.KeyWords{"extra_attrs.artifact.digest": artifact.Digest})
|
||||
query := q.New(q.KeyWords{"vendor_type": job.ImageScanJobVendorType, "extra_attrs.artifact.digest": artifact.Digest, "extra_attrs.enabled_capabilities.type": capType})
|
||||
executions, err := bc.execMgr.List(ctx, query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(executions) == 0 {
|
||||
message := fmt.Sprintf("no scan job for artifact digest=%v", artifact.Digest)
|
||||
return errors.BadRequestError(nil).WithMessage(message)
|
||||
|
@ -379,7 +402,9 @@ func (bc *basicController) ScanAll(ctx context.Context, trigger string, async bo
|
|||
}
|
||||
|
||||
err = bc.startScanAll(ctx, executionID)
|
||||
log.Errorf("failed to start scan all, executionID=%d, error: %v", executionID, err)
|
||||
if err != nil {
|
||||
log.Errorf("failed to start scan all, executionID=%d, error: %v", executionID, err)
|
||||
}
|
||||
}(bc.makeCtx())
|
||||
} else {
|
||||
if err := bc.startScanAll(ctx, executionID); err != nil {
|
||||
|
@ -541,13 +566,15 @@ func (bc *basicController) startScanAll(ctx context.Context, executionID int64)
|
|||
return nil
|
||||
}
|
||||
|
||||
func (bc *basicController) makeReportPlaceholder(ctx context.Context, r *scanner.Registration, art *ar.Artifact) ([]*scan.Report, error) {
|
||||
mimeTypes := r.GetProducesMimeTypes(art.ManifestMediaType)
|
||||
|
||||
func (bc *basicController) makeReportPlaceholder(ctx context.Context, r *scanner.Registration, art *ar.Artifact, opts *Options) ([]*scan.Report, error) {
|
||||
mimeTypes := r.GetProducesMimeTypes(art.ManifestMediaType, opts.GetScanType())
|
||||
oldReports, err := bc.manager.GetBy(bc.cloneCtx(ctx), art.Digest, r.UUID, mimeTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := bc.deleteArtifactAccessories(ctx, oldReports); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := bc.assembleReports(ctx, oldReports...); err != nil {
|
||||
return nil, err
|
||||
|
@ -569,7 +596,7 @@ func (bc *basicController) makeReportPlaceholder(ctx context.Context, r *scanner
|
|||
|
||||
var reports []*scan.Report
|
||||
|
||||
for _, pm := range r.GetProducesMimeTypes(art.ManifestMediaType) {
|
||||
for _, pm := range r.GetProducesMimeTypes(art.ManifestMediaType, opts.GetScanType()) {
|
||||
report := &scan.Report{
|
||||
Digest: art.Digest,
|
||||
RegistrationUUID: r.UUID,
|
||||
|
@ -670,12 +697,23 @@ func (bc *basicController) GetReport(ctx context.Context, artifact *ar.Artifact,
|
|||
return reports, nil
|
||||
}
|
||||
|
||||
func isSBOMMimeTypes(mimeTypes []string) bool {
|
||||
for _, mimeType := range mimeTypes {
|
||||
if mimeType == v1.MimeTypeSBOMReport {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetSummary ...
|
||||
func (bc *basicController) GetSummary(ctx context.Context, artifact *ar.Artifact, mimeTypes []string) (map[string]interface{}, error) {
|
||||
if artifact == nil {
|
||||
return nil, errors.New("no way to get report summaries for nil artifact")
|
||||
}
|
||||
|
||||
if isSBOMMimeTypes(mimeTypes) {
|
||||
return bc.GetSBOMSummary(ctx, artifact, mimeTypes)
|
||||
}
|
||||
// Get reports first
|
||||
rps, err := bc.GetReport(ctx, artifact, mimeTypes)
|
||||
if err != nil {
|
||||
|
@ -704,6 +742,52 @@ func (bc *basicController) GetSummary(ctx context.Context, artifact *ar.Artifact
|
|||
return summaries, nil
|
||||
}
|
||||
|
||||
func (bc *basicController) GetSBOMSummary(ctx context.Context, art *ar.Artifact, mimeTypes []string) (map[string]interface{}, error) {
|
||||
if art == nil {
|
||||
return nil, errors.New("no way to get report summaries for nil artifact")
|
||||
}
|
||||
r, err := bc.sc.GetRegistrationByProject(ctx, art.ProjectID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "scan controller: get sbom summary")
|
||||
}
|
||||
reports, err := bc.manager.GetBy(ctx, art.Digest, r.UUID, mimeTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(reports) == 0 {
|
||||
return map[string]interface{}{}, nil
|
||||
}
|
||||
reportContent := reports[0].Report
|
||||
result := map[string]interface{}{}
|
||||
if len(reportContent) == 0 {
|
||||
status := bc.retrieveStatusFromTask(ctx, reports[0].UUID)
|
||||
if len(status) > 0 {
|
||||
result[sbomModel.ReportID] = reports[0].UUID
|
||||
result[sbomModel.ScanStatus] = status
|
||||
}
|
||||
log.Debug("no content for current report")
|
||||
return result, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(reportContent), &result)
|
||||
return result, err
|
||||
}
|
||||
|
||||
// retrieve the status from task
|
||||
func (bc *basicController) retrieveStatusFromTask(ctx context.Context, reportID string) string {
|
||||
if len(reportID) == 0 {
|
||||
return ""
|
||||
}
|
||||
tasks, err := bc.taskMgr.ListScanTasksByReportUUID(ctx, reportID)
|
||||
if err != nil {
|
||||
log.Warningf("can not find the task with report UUID %v, error %v", reportID, err)
|
||||
return ""
|
||||
}
|
||||
if len(tasks) > 0 {
|
||||
return tasks[0].Status
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetScanLog ...
|
||||
func (bc *basicController) GetScanLog(ctx context.Context, artifact *ar.Artifact, uuid string) ([]byte, error) {
|
||||
if len(uuid) == 0 {
|
||||
|
@ -910,7 +994,7 @@ func (bc *basicController) GetVulnerable(ctx context.Context, artifact *ar.Artif
|
|||
}
|
||||
|
||||
// makeRobotAccount creates a robot account based on the arguments for scanning.
|
||||
func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64, repository string, registration *scanner.Registration) (*robot.Robot, error) {
|
||||
func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64, repository string, registration *scanner.Registration, permission []*types.Policy) (*robot.Robot, error) {
|
||||
// Use uuid as name to avoid duplicated entries.
|
||||
UUID, err := bc.uuid()
|
||||
if err != nil {
|
||||
|
@ -925,22 +1009,14 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
|||
Name: fmt.Sprintf("%s-%s-%s", scannerPrefix, registration.Name, UUID),
|
||||
Description: "for scan",
|
||||
ProjectID: projectID,
|
||||
Duration: -1,
|
||||
},
|
||||
Level: robot.LEVELPROJECT,
|
||||
Permissions: []*robot.Permission{
|
||||
{
|
||||
Kind: "project",
|
||||
Namespace: projectName,
|
||||
Access: []*types.Policy{
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionPull,
|
||||
},
|
||||
{
|
||||
Resource: rbac.ResourceRepository,
|
||||
Action: rbac.ActionScannerPull,
|
||||
},
|
||||
},
|
||||
Access: permission,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -959,7 +1035,7 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
|
|||
}
|
||||
|
||||
// launchScanJob launches a job to run scan
|
||||
func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJobParam) error {
|
||||
func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJobParam, opts *Options) error {
|
||||
// don't launch scan job for the artifact which is not supported by the scanner
|
||||
if !hasCapability(param.Registration, param.Artifact) {
|
||||
return nil
|
||||
|
@ -977,7 +1053,12 @@ func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJ
|
|||
return errors.Wrap(err, "scan controller: launch scan job")
|
||||
}
|
||||
|
||||
robot, err := bc.makeRobotAccount(ctx, param.Artifact.ProjectID, param.Artifact.RepositoryName, param.Registration)
|
||||
// Get Scanner handler by scan type to separate the scan logic for different scan types
|
||||
handler := sca.GetScanHandler(param.Type)
|
||||
if handler == nil {
|
||||
return fmt.Errorf("failed to get scan handler, type is %v", param.Type)
|
||||
}
|
||||
robot, err := bc.makeRobotAccount(ctx, param.Artifact.ProjectID, param.Artifact.RepositoryName, param.Registration, handler.RequiredPermissions())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "scan controller: launch scan job")
|
||||
}
|
||||
|
@ -993,6 +1074,12 @@ func (bc *basicController) launchScanJob(ctx context.Context, param *launchScanJ
|
|||
Digest: param.Artifact.Digest,
|
||||
Tag: param.Tag,
|
||||
MimeType: param.Artifact.ManifestMediaType,
|
||||
Size: param.Artifact.Size,
|
||||
},
|
||||
RequestType: []*v1.ScanType{
|
||||
{
|
||||
Type: opts.GetScanType(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -1227,3 +1314,48 @@ func parseOptions(options ...Option) (*Options, error) {
|
|||
|
||||
return ops, nil
|
||||
}
|
||||
|
||||
// deleteArtifactAccessories delete the accessory in reports, only delete sbom accessory
|
||||
func (bc *basicController) deleteArtifactAccessories(ctx context.Context, reports []*scan.Report) error {
|
||||
for _, rpt := range reports {
|
||||
if rpt.MimeType != v1.MimeTypeSBOMReport {
|
||||
continue
|
||||
}
|
||||
if err := bc.deleteArtifactAccessory(ctx, rpt.Report); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteArtifactAccessory check if current report has accessory info, if there is, delete it
|
||||
func (bc *basicController) deleteArtifactAccessory(ctx context.Context, report string) error {
|
||||
if len(report) == 0 {
|
||||
return nil
|
||||
}
|
||||
sbomSummary := sbomModel.Summary{}
|
||||
if err := json.Unmarshal([]byte(report), &sbomSummary); err != nil {
|
||||
// it could be a non sbom report, just skip
|
||||
log.Debugf("fail to unmarshal %v, skip to delete sbom report", err)
|
||||
return nil
|
||||
}
|
||||
repo, dgst := sbomSummary.SBOMAccArt()
|
||||
if len(repo) == 0 || len(dgst) == 0 {
|
||||
return nil
|
||||
}
|
||||
art, err := bc.ar.GetByReference(ctx, repo, dgst, nil)
|
||||
if err != nil {
|
||||
if errors.IsNotFoundErr(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if art == nil {
|
||||
return nil
|
||||
}
|
||||
err = bc.ar.Delete(ctx, art.ID)
|
||||
if errors.IsNotFoundErr(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ import (
|
|||
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
|
||||
v1 "github.com/goharbor/harbor/src/pkg/scan/rest/v1"
|
||||
"github.com/goharbor/harbor/src/pkg/scan/vuln"
|
||||
_ "github.com/goharbor/harbor/src/pkg/scan/vulnerability"
|
||||
"github.com/goharbor/harbor/src/pkg/task"
|
||||
artifacttesting "github.com/goharbor/harbor/src/testing/controller/artifact"
|
||||
robottesting "github.com/goharbor/harbor/src/testing/controller/robot"
|
||||
|
@ -69,15 +70,16 @@ type ControllerTestSuite struct {
|
|||
|
||||
tagCtl *tagtesting.FakeController
|
||||
|
||||
registration *scanner.Registration
|
||||
artifact *artifact.Artifact
|
||||
rawReport string
|
||||
registration *scanner.Registration
|
||||
artifact *artifact.Artifact
|
||||
wrongArtifact *artifact.Artifact
|
||||
rawReport string
|
||||
|
||||
execMgr *tasktesting.ExecutionManager
|
||||
taskMgr *tasktesting.Manager
|
||||
reportMgr *reporttesting.Manager
|
||||
ar artifact.Controller
|
||||
c Controller
|
||||
c *basicController
|
||||
reportConverter *postprocessorstesting.ScanReportV1ToV2Converter
|
||||
cache *mockcache.Cache
|
||||
}
|
||||
|
@ -100,6 +102,9 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
suite.artifact.Digest = "digest-code"
|
||||
suite.artifact.ManifestMediaType = v1.MimeTypeDockerArtifact
|
||||
|
||||
suite.wrongArtifact = &artifact.Artifact{Artifact: art.Artifact{ID: 2, ProjectID: 1}}
|
||||
suite.wrongArtifact.Digest = "digest-wrong"
|
||||
|
||||
m := &v1.ScannerAdapterMetadata{
|
||||
Scanner: &v1.Scanner{
|
||||
Name: "Trivy",
|
||||
|
@ -107,6 +112,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
Version: "0.1.0",
|
||||
},
|
||||
Capabilities: []*v1.ScannerCapability{{
|
||||
Type: v1.ScanTypeVulnerability,
|
||||
ConsumesMimeTypes: []string{
|
||||
v1.MimeTypeOCIArtifact,
|
||||
v1.MimeTypeDockerArtifact,
|
||||
|
@ -114,7 +120,17 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
ProducesMimeTypes: []string{
|
||||
v1.MimeTypeNativeReport,
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Type: v1.ScanTypeSbom,
|
||||
ConsumesMimeTypes: []string{
|
||||
v1.MimeTypeOCIArtifact,
|
||||
},
|
||||
ProducesMimeTypes: []string{
|
||||
v1.MimeTypeSBOMReport,
|
||||
},
|
||||
},
|
||||
},
|
||||
Properties: v1.ScannerProperties{
|
||||
"extra": "testing",
|
||||
},
|
||||
|
@ -179,7 +195,22 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
},
|
||||
}
|
||||
|
||||
sbomReport := []*scan.Report{
|
||||
{
|
||||
ID: 12,
|
||||
UUID: "rp-uuid-002",
|
||||
Digest: "digest-code",
|
||||
RegistrationUUID: "uuid001",
|
||||
MimeType: "application/vnd.scanner.adapter.sbom.report.harbor+json; version=1.0",
|
||||
Status: "Success",
|
||||
Report: `{"sbom_digest": "sha256:1234567890", "scan_status": "Success", "duration": 3, "start_time": "2021-09-01T00:00:00Z", "end_time": "2021-09-01T00:00:03Z"}`,
|
||||
},
|
||||
}
|
||||
|
||||
emptySBOMReport := []*scan.Report{{Report: ``, UUID: "rp-uuid-004"}}
|
||||
mgr.On("GetBy", mock.Anything, suite.artifact.Digest, suite.registration.UUID, []string{v1.MimeTypeNativeReport}).Return(reports, nil)
|
||||
mgr.On("GetBy", mock.Anything, suite.artifact.Digest, suite.registration.UUID, []string{v1.MimeTypeSBOMReport}).Return(sbomReport, nil)
|
||||
mgr.On("GetBy", mock.Anything, suite.wrongArtifact.Digest, suite.registration.UUID, []string{v1.MimeTypeSBOMReport}).Return(emptySBOMReport, nil)
|
||||
mgr.On("Get", mock.Anything, "rp-uuid-001").Return(reports[0], nil)
|
||||
mgr.On("UpdateReportData", "rp-uuid-001", suite.rawReport, (int64)(10000)).Return(nil)
|
||||
mgr.On("UpdateStatus", "the-uuid-123", "Success", (int64)(10000)).Return(nil)
|
||||
|
@ -199,6 +230,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
Name: rname,
|
||||
Description: "for scan",
|
||||
ProjectID: suite.artifact.ProjectID,
|
||||
Duration: -1,
|
||||
},
|
||||
Level: robot.LEVELPROJECT,
|
||||
Permissions: []*robot.Permission{
|
||||
|
@ -229,6 +261,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
|
|||
Secret: "robot-account",
|
||||
Description: "for scan",
|
||||
ProjectID: suite.artifact.ProjectID,
|
||||
Duration: -1,
|
||||
},
|
||||
Level: "project",
|
||||
}, nil)
|
||||
|
@ -317,6 +350,7 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
|||
{
|
||||
// artifact not provieded
|
||||
suite.Require().Error(suite.c.Scan(context.TODO(), nil))
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Times(3)
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -336,7 +370,7 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
|||
mock.OnAnything(suite.execMgr, "Create").Return(int64(1), nil).Once()
|
||||
mock.OnAnything(suite.taskMgr, "Create").Return(int64(1), nil).Once()
|
||||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
ctx := orm.NewContext(context.TODO(), &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().NoError(suite.c.Scan(ctx, suite.artifact))
|
||||
}
|
||||
|
@ -378,7 +412,7 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
|
|||
func (suite *ControllerTestSuite) TestScanControllerStop() {
|
||||
{
|
||||
// artifact not provieded
|
||||
suite.Require().Error(suite.c.Stop(context.TODO(), nil))
|
||||
suite.Require().Error(suite.c.Stop(context.TODO(), nil, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -390,7 +424,7 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
|||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().NoError(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().NoError(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -400,7 +434,7 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
|||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -409,12 +443,13 @@ func (suite *ControllerTestSuite) TestScanControllerStop() {
|
|||
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact))
|
||||
suite.Require().Error(suite.c.Stop(ctx, suite.artifact, "vulnerability"))
|
||||
}
|
||||
}
|
||||
|
||||
// TestScanControllerGetReport ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
|
@ -433,13 +468,13 @@ func (suite *ControllerTestSuite) TestScanControllerGetReport() {
|
|||
// TestScanControllerGetSummary ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetSummary() {
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
mock.OnAnything(suite.accessoryMgr, "List").Return([]accessoryModel.Accessory{}, nil).Once()
|
||||
mock.OnAnything(suite.ar, "Walk").Return(nil).Run(func(args mock.Arguments) {
|
||||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(suite.artifact)
|
||||
}).Once()
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return(nil, nil).Once()
|
||||
|
||||
sum, err := suite.c.GetSummary(ctx, suite.artifact, []string{v1.MimeTypeNativeReport})
|
||||
require.NoError(suite.T(), err)
|
||||
assert.Equal(suite.T(), 1, len(sum))
|
||||
|
@ -447,6 +482,7 @@ func (suite *ControllerTestSuite) TestScanControllerGetSummary() {
|
|||
|
||||
// TestScanControllerGetScanLog ...
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetScanLog() {
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.taskMgr, "ListScanTasksByReportUUID").Return([]*task.Task{
|
||||
{
|
||||
|
@ -467,6 +503,7 @@ func (suite *ControllerTestSuite) TestScanControllerGetScanLog() {
|
|||
|
||||
func (suite *ControllerTestSuite) TestScanControllerGetMultiScanLog() {
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Times(4)
|
||||
suite.taskMgr.On("ListScanTasksByReportUUID", ctx, "rp-uuid-001").Return([]*task.Task{
|
||||
{
|
||||
ID: 1,
|
||||
|
@ -529,7 +566,7 @@ func (suite *ControllerTestSuite) TestScanAll() {
|
|||
{
|
||||
// no artifacts found when scan all
|
||||
executionID := int64(1)
|
||||
|
||||
mock.OnAnything(suite.ar, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
suite.execMgr.On(
|
||||
"Create", mock.Anything, "SCAN_ALL", int64(0), "SCHEDULE",
|
||||
mock.Anything).Return(executionID, nil).Once()
|
||||
|
@ -617,3 +654,57 @@ func (suite *ControllerTestSuite) makeExtraAttrs(artifactID int64, reportUUIDs .
|
|||
|
||||
return extraAttrs
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestGenerateSBOMSummary() {
|
||||
sum, err := suite.c.GetSBOMSummary(context.TODO(), suite.artifact, []string{v1.MimeTypeSBOMReport})
|
||||
suite.Nil(err)
|
||||
suite.NotNil(sum)
|
||||
status := sum["scan_status"]
|
||||
suite.NotNil(status)
|
||||
dgst := sum["sbom_digest"]
|
||||
suite.NotNil(dgst)
|
||||
suite.Equal("Success", status)
|
||||
suite.Equal("sha256:1234567890", dgst)
|
||||
tasks := []*task.Task{{Status: "Error"}}
|
||||
suite.taskMgr.On("ListScanTasksByReportUUID", mock.Anything, "rp-uuid-004").Return(tasks, nil).Once()
|
||||
sum2, err := suite.c.GetSummary(context.TODO(), suite.wrongArtifact, []string{v1.MimeTypeSBOMReport})
|
||||
suite.Nil(err)
|
||||
suite.NotNil(sum2)
|
||||
|
||||
}
|
||||
|
||||
func TestIsSBOMMimeTypes(t *testing.T) {
|
||||
// Test with a slice containing the SBOM mime type
|
||||
assert.True(t, isSBOMMimeTypes([]string{v1.MimeTypeSBOMReport}))
|
||||
|
||||
// Test with a slice not containing the SBOM mime type
|
||||
assert.False(t, isSBOMMimeTypes([]string{"application/vnd.oci.image.manifest.v1+json"}))
|
||||
|
||||
// Test with an empty slice
|
||||
assert.False(t, isSBOMMimeTypes([]string{}))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestDeleteArtifactAccessories() {
|
||||
// artifact not provided
|
||||
suite.Nil(suite.c.deleteArtifactAccessories(context.TODO(), nil))
|
||||
|
||||
// artifact is provided
|
||||
art := &artifact.Artifact{Artifact: art.Artifact{ID: 1, ProjectID: 1, RepositoryName: "library/photon"}}
|
||||
mock.OnAnything(suite.ar, "GetByReference").Return(art, nil).Once()
|
||||
mock.OnAnything(suite.ar, "Delete").Return(nil).Once()
|
||||
reportContent := `{"sbom_digest":"sha256:12345", "scan_status":"Success", "duration":3, "sbom_repository":"library/photon"}`
|
||||
emptyReportContent := ``
|
||||
reports := []*scan.Report{
|
||||
{Report: reportContent},
|
||||
{Report: emptyReportContent},
|
||||
}
|
||||
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{})
|
||||
suite.NoError(suite.c.deleteArtifactAccessories(ctx, reports))
|
||||
}
|
||||
|
||||
func (suite *ControllerTestSuite) TestRetrieveStatusFromTask() {
|
||||
tasks := []*task.Task{{Status: "Error"}}
|
||||
suite.taskMgr.On("ListScanTasksByReportUUID", mock.Anything, "rp-uuid-004").Return(tasks, nil).Once()
|
||||
status := suite.c.retrieveStatusFromTask(nil, "rp-uuid-004")
|
||||
suite.Equal("Error", status)
|
||||
}
|
||||
|
|
|
@ -120,6 +120,13 @@ func scanTaskStatusChange(ctx context.Context, taskID int64, status string) (err
|
|||
if operator, ok := exec.ExtraAttrs["operator"].(string); ok {
|
||||
e.Operator = operator
|
||||
}
|
||||
|
||||
// extract ScanType if exist in ExtraAttrs
|
||||
if c, ok := exec.ExtraAttrs["enabled_capabilities"].(map[string]interface{}); ok {
|
||||
if Type, ok := c["type"].(string); ok {
|
||||
e.ScanType = Type
|
||||
}
|
||||
}
|
||||
// fire event
|
||||
notification.AddEvent(ctx, e)
|
||||
}
|
||||
|
|
|
@ -86,6 +86,18 @@ func (c *checker) IsScannable(ctx context.Context, art *artifact.Artifact) (bool
|
|||
return artifact.ErrBreak
|
||||
}
|
||||
|
||||
// because there are lots of in-toto sbom artifacts in dockerhub and replicated to Harbor, they are considered as image type
|
||||
// when scanning these type of sbom artifact, the scanner might assume it is image layer with tgz format, and if scanner read the layer with a stream of tgz,
|
||||
// it fail and close the stream abruptly and cause the pannic in the harbor core log
|
||||
// to avoid pannic, skip scan the in-toto sbom artifact sbom artifact
|
||||
unscannable, err := c.artifactCtl.HasUnscannableLayer(ctx, a.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if unscannable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ func (suite *CheckerTestSuite) TestIsScannable() {
|
|||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(art)
|
||||
})
|
||||
|
||||
mock.OnAnything(c.artifactCtl, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
isScannable, err := c.IsScannable(context.TODO(), art)
|
||||
suite.Nil(err)
|
||||
suite.False(isScannable)
|
||||
|
@ -97,6 +97,7 @@ func (suite *CheckerTestSuite) TestIsScannable() {
|
|||
walkFn := args.Get(2).(func(*artifact.Artifact) error)
|
||||
walkFn(art)
|
||||
})
|
||||
mock.OnAnything(c.artifactCtl, "HasUnscannableLayer").Return(false, nil).Once()
|
||||
|
||||
isScannable, err := c.IsScannable(context.TODO(), art)
|
||||
suite.Nil(err)
|
||||
|
|
|
@ -55,10 +55,11 @@ type Controller interface {
|
|||
// Arguments:
|
||||
// ctx context.Context : the context for this method
|
||||
// artifact *artifact.Artifact : the artifact whose scan job to be stopped
|
||||
// capType string : the capability type of the scanner, vulnerability or SBOM.
|
||||
//
|
||||
// Returns:
|
||||
// error : non nil error if any errors occurred
|
||||
Stop(ctx context.Context, artifact *artifact.Artifact) error
|
||||
Stop(ctx context.Context, artifact *artifact.Artifact, capType string) error
|
||||
|
||||
// GetReport gets the reports for the given artifact identified by the digest
|
||||
//
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue