Merge branch 'main' into 19319-allow-flatenning-on-empty-namespace

This commit is contained in:
NicoForce 2024-02-28 13:41:25 +00:00 committed by GitHub
commit 8a561743e4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5170 changed files with 10567 additions and 1538576 deletions

View File

@ -41,10 +41,10 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -89,7 +89,7 @@ jobs:
bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP bash ./tests/showtime.sh ./tests/ci/ut_run.sh $IP
df -h df -h
- name: Codecov For BackEnd - name: Codecov For BackEnd
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
file: ./src/github.com/goharbor/harbor/profile.cov file: ./src/github.com/goharbor/harbor/profile.cov
flags: unittests flags: unittests
@ -102,10 +102,10 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -157,10 +157,10 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -212,10 +212,10 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -265,10 +265,10 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
@ -317,7 +317,7 @@ jobs:
- ubuntu-latest - ubuntu-latest
timeout-minutes: 100 timeout-minutes: 100
steps: steps:
- uses: actions/setup-node@v3 - uses: actions/setup-node@v4
with: with:
node-version: '18' node-version: '18'
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -331,7 +331,7 @@ jobs:
bash ./tests/showtime.sh ./tests/ci/ui_ut_run.sh bash ./tests/showtime.sh ./tests/ci/ui_ut_run.sh
df -h df -h
- name: Codecov For UI - name: Codecov For UI
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v4
with: with:
file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info file: ./src/github.com/goharbor/harbor/src/portal/coverage/lcov.info
flags: unittests flags: unittests

View File

@ -13,6 +13,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set the author of a PR as the assignee - name: Set the author of a PR as the assignee
uses: kentaro-m/auto-assign-action@v1.2.5 uses: kentaro-m/auto-assign-action@v2.0.0
with: with:
configuration-path: ".github/auto-assignees.yml" configuration-path: ".github/auto-assignees.yml"

View File

@ -16,17 +16,17 @@ jobs:
- ubuntu-20.04 - ubuntu-20.04
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: 'google-github-actions/auth@v1' - uses: 'google-github-actions/auth@v2'
with: with:
credentials_json: '${{ secrets.GCP_CREDENTIALS }}' credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
- uses: google-github-actions/setup-gcloud@v1 - uses: google-github-actions/setup-gcloud@v2
with: with:
version: '430.0.0' version: '430.0.0'
- run: gcloud info - run: gcloud info
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- name: Setup Docker - name: Setup Docker
uses: docker-practice/actions-setup-docker@master uses: docker-practice/actions-setup-docker@master

View File

@ -26,7 +26,7 @@ jobs:
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v3
# Override language selection by uncommenting this and choosing your languages # Override language selection by uncommenting this and choosing your languages
# with: # with:
# languages: go, javascript, csharp, python, cpp, java # languages: go, javascript, csharp, python, cpp, java
@ -48,4 +48,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v3

View File

@ -20,15 +20,15 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- id: 'auth' - id: 'auth'
name: 'Authenticate to Google Cloud' name: 'Authenticate to Google Cloud'
uses: google-github-actions/auth@v1 uses: google-github-actions/auth@v2
with: with:
credentials_json: '${{ secrets.GCP_CREDENTIALS }}' credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
- uses: google-github-actions/setup-gcloud@v1 - uses: google-github-actions/setup-gcloud@v2
- run: gcloud info - run: gcloud info
- name: Set up Go 1.20 - name: Set up Go 1.21
uses: actions/setup-go@v1 uses: actions/setup-go@v5
with: with:
go-version: 1.20.10 go-version: 1.21.5
id: go id: go
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:

View File

@ -7,7 +7,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@v8.0.0 - uses: actions/stale@v9.0.0
with: with:
stale-issue-message: 'This issue is being marked stale due to a period of inactivity. If this issue is still relevant, please comment or remove the stale label. Otherwise, this issue will close in 30 days.' stale-issue-message: 'This issue is being marked stale due to a period of inactivity. If this issue is still relevant, please comment or remove the stale label. Otherwise, this issue will close in 30 days.'
stale-pr-message: 'This PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.' stale-pr-message: 'This PR is being marked stale due to a period of inactivty. If this PR is still relevant, please comment or remove the stale label. Otherwise, this PR will close in 30 days.'

View File

@ -12,7 +12,7 @@ jobs:
matrix: matrix:
# maintain the versions of harbor that need to be actively # maintain the versions of harbor that need to be actively
# security scanned # security scanned
versions: [dev, v2.9.0-dev] versions: [dev, v2.10.0-dev]
# list of images that need to be scanned # list of images that need to be scanned
images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare] images: [harbor-core, harbor-db, harbor-exporter, harbor-jobservice, harbor-log, harbor-portal, harbor-registryctl, prepare]
permissions: permissions:
@ -32,6 +32,6 @@ jobs:
output: 'trivy-results.sarif' output: 'trivy-results.sarif'
- name: Upload Trivy scan results to GitHub Security tab - name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2 uses: github/codeql-action/upload-sarif@v3
with: with:
sarif_file: 'trivy-results.sarif' sarif_file: 'trivy-results.sarif'

View File

@ -19,10 +19,10 @@ jobs:
echo "PRE_TAG=$(echo $release | jq -r '.body' | jq -r '.preTag')" >> $GITHUB_ENV echo "PRE_TAG=$(echo $release | jq -r '.body' | jq -r '.preTag')" >> $GITHUB_ENV
echo "BRANCH=$(echo $release | jq -r '.target_commitish')" >> $GITHUB_ENV echo "BRANCH=$(echo $release | jq -r '.target_commitish')" >> $GITHUB_ENV
echo "PRERELEASE=$(echo $release | jq -r '.prerelease')" >> $GITHUB_ENV echo "PRERELEASE=$(echo $release | jq -r '.prerelease')" >> $GITHUB_ENV
- uses: 'google-github-actions/auth@v1' - uses: 'google-github-actions/auth@v2'
with: with:
credentials_json: '${{ secrets.GCP_CREDENTIALS }}' credentials_json: '${{ secrets.GCP_CREDENTIALS }}'
- uses: google-github-actions/setup-gcloud@v1 - uses: google-github-actions/setup-gcloud@v2
with: with:
version: '430.0.0' version: '430.0.0'
- name: Prepare Assets - name: Prepare Assets

View File

@ -39,6 +39,7 @@ be added to this list as they transition to production deployments.
<a href="https://www.dynatrace.com/" target="_blank" border="0"><img alt="Dynatrace" src="https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/users-partners/dynatrace-logo.png"></a>&nbsp; &nbsp; &nbsp; &nbsp; <a href="https://www.dynatrace.com/" target="_blank" border="0"><img alt="Dynatrace" src="https://raw.githubusercontent.com/goharbor/website/main/static/img/logos/users-partners/dynatrace-logo.png"></a>&nbsp; &nbsp; &nbsp; &nbsp;
<a href="https://www.home.cern/" target="_blank" border="0">CERN</a>&nbsp; &nbsp; &nbsp; &nbsp; <a href="https://www.home.cern/" target="_blank" border="0">CERN</a>&nbsp; &nbsp; &nbsp; &nbsp;
<a href="https://www.ns.nl/" target="_blank" border="0"><img alt="Nederlandse Spoorwegen" src="https://raw.githubusercontent.com/goharbor/website/main/docs/img/adopters/nederlandse-spoorwegen.png" height="40"></a>&nbsp; &nbsp; &nbsp; &nbsp; <a href="https://www.ns.nl/" target="_blank" border="0"><img alt="Nederlandse Spoorwegen" src="https://raw.githubusercontent.com/goharbor/website/main/docs/img/adopters/nederlandse-spoorwegen.png" height="40"></a>&nbsp; &nbsp; &nbsp; &nbsp;
<a href="https://www.de-cix.net/" target="_blank" border="0"><img alt="DE-CIX" src="https://raw.githubusercontent.com/goharbor/website/main/docs/img/adopters/de-cix.png" height="50"></a>&nbsp; &nbsp; &nbsp; &nbsp;
## Success Stories ## Success Stories
@ -88,6 +89,8 @@ feature within Harbor before deploying images into production.
and scan customized container images for different business applications, like and scan customized container images for different business applications, like
ELK stack, as part of their CI/CD pipeline. ELK stack, as part of their CI/CD pipeline.
**DE-CIX:** Harbor has been integrated into the application stack to replace the former hosted Docker registry, now known as the Distribution Registry. With Harbor, we have started separating access to project-related images using OIDC group mapping and robot accounts with dedicated permissions. Another significant benefit comes with the implemented vulnerability scanner, which makes vulnerabilities more transparent to our teams.
## Adding your logo ## Adding your logo
If you would like to add your logo here and to the `Users and Partners of Harbor` section of the website, add a PNG or SVG version of your logo to the [adopters](https://github.com/goharbor/website/tree/main/docs/img/adopters) directory of the [website](https://github.com/goharbor/website) and submit a pull request with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). We will follow up and make the change in the goharbor.io website as well. If you would like to add your logo here and to the `Users and Partners of Harbor` section of the website, add a PNG or SVG version of your logo to the [adopters](https://github.com/goharbor/website/tree/main/docs/img/adopters) directory of the [website](https://github.com/goharbor/website) and submit a pull request with your change. Name the image file something that reflects your company (e.g., if your company is called Acme, name the image acme.png). We will follow up and make the change in the goharbor.io website as well.

View File

@ -134,12 +134,7 @@ The folder graph below shows the structure of the source code folder `harbor/src
│   ├── registry │   ├── registry
│   ├── router │   ├── router
│   ├── v2.0 │   ├── v2.0
├── testing # Some utilities to handle testing. └── testing # Some utilities to handle testing.
└── vendor # Go code dependencies
├── github.com
├── golang.org
├── google.golang.org
└── gopkg.in
``` ```
### Setup Development Environment ### Setup Development Environment
@ -168,12 +163,12 @@ Harbor backend is written in [Go](http://golang.org/). If you don't have a Harbo
| 2.6 | 1.18.6 | | 2.6 | 1.18.6 |
| 2.7 | 1.19.4 | | 2.7 | 1.19.4 |
| 2.8 | 1.20.6 | | 2.8 | 1.20.6 |
| 2.9 | 1.20.10 | | 2.9 | 1.21.3 |
| 2.10 | 1.21.5 |
Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions. Ensure your GOPATH and PATH have been configured in accordance with the Go environment instructions.
**Dependency Management:** Harbor uses [Go modules](https://github.com/golang/go/wiki/Modules) for dependency management of go code. The official maintainers will take the responsibility for managing the code in `vendor` directory. Please don't try to submit a PR to update the dependency code, open an issue instead. If your PR requires a change in the vendor code please make sure you discuss it with the maintainers in advance.
#### Web #### Web
Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup web UI development environment, please make sure the [npm](https://www.npmjs.com/get-npm) tool is installed first. Harbor web UI is built based on [Clarity](https://vmware.github.io/clarity/) and [Angular](https://angular.io/) web framework. To setup web UI development environment, please make sure the [npm](https://www.npmjs.com/get-npm) tool is installed first.
@ -258,7 +253,7 @@ go install github.com/GeertJohan/fgt@latest
#In the #working_dir/harbor, run #In the #working_dir/harbor, run
go list ./... | grep -v -E 'vendor|tests' | xargs -L1 fgt golint go list ./... | grep -v -E 'tests' | xargs -L1 fgt golint
``` ```

View File

@ -103,12 +103,12 @@ PKGVERSIONTAG=dev
PREPARE_VERSION_NAME=versions PREPARE_VERSION_NAME=versions
#versions #versions
REGISTRYVERSION=v2.8.2-patch-redis REGISTRYVERSION=v2.8.3-patch-redis
TRIVYVERSION=v0.46.0 TRIVYVERSION=v0.49.1
TRIVYADAPTERVERSION=v0.30.17 TRIVYADAPTERVERSION=v0.30.22
# version of registry for pulling the source code # version of registry for pulling the source code
REGISTRY_SRC_TAG=v2.8.2 REGISTRY_SRC_TAG=v2.8.3
# dependency binaries # dependency binaries
REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry REGISTRYURL=https://storage.googleapis.com/harbor-builds/bin/registry/release-${REGISTRYVERSION}/registry
@ -140,7 +140,7 @@ GOINSTALL=$(GOCMD) install
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GODEP=$(GOTEST) -i GODEP=$(GOTEST) -i
GOFMT=gofmt -w GOFMT=gofmt -w
GOBUILDIMAGE=golang:1.20.10 GOBUILDIMAGE=golang:1.21.5
GOBUILDPATHINCONTAINER=/harbor GOBUILDPATHINCONTAINER=/harbor
# go build # go build
@ -156,7 +156,7 @@ ifneq ($(GOBUILDLDFLAGS),)
endif endif
# go build command # go build command
GOIMAGEBUILDCMD=/usr/local/go/bin/go build -mod vendor GOIMAGEBUILDCMD=/usr/local/go/bin/go build
GOIMAGEBUILD_COMMON=$(GOIMAGEBUILDCMD) $(GOFLAGS) ${GOTAGS} ${GOLDFLAGS} GOIMAGEBUILD_COMMON=$(GOIMAGEBUILDCMD) $(GOFLAGS) ${GOTAGS} ${GOLDFLAGS}
GOIMAGEBUILD_CORE=$(GOIMAGEBUILDCMD) $(GOFLAGS) ${GOTAGS} --ldflags "-w -s $(CORE_LDFLAGS)" GOIMAGEBUILD_CORE=$(GOIMAGEBUILDCMD) $(GOFLAGS) ${GOTAGS} --ldflags "-w -s $(CORE_LDFLAGS)"
@ -312,7 +312,7 @@ gen_apis: lint_apis
MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery MOCKERY_IMAGENAME=$(IMAGENAMESPACE)/mockery
MOCKERY_VERSION=v2.22.1 MOCKERY_VERSION=v2.35.4
MOCKERY=$(RUNCONTAINER) ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION} MOCKERY=$(RUNCONTAINER) ${MOCKERY_IMAGENAME}:${MOCKERY_VERSION}
MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) . MOCKERY_IMAGE_BUILD_CMD=${DOCKERBUILD} -f ${TOOLSPATH}/mockery/Dockerfile --build-arg GOLANG=${GOBUILDIMAGE} --build-arg MOCKERY_VERSION=${MOCKERY_VERSION} -t ${MOCKERY_IMAGENAME}:$(MOCKERY_VERSION) .
@ -452,21 +452,11 @@ package_offline: update_prepare_version compile build
@rm -rf $(HARBORPKG) @rm -rf $(HARBORPKG)
@echo "Done." @echo "Done."
gosec:
#go get github.com/securego/gosec/cmd/gosec
#go get github.com/dghubble/sling
@echo "run secure go scan ..."
@if [ "$(GOSECRESULTS)" != "" ] ; then \
$(GOPATH)/bin/gosec -fmt=json -out=$(GOSECRESULTS) -quiet ./... | true ; \
else \
$(GOPATH)/bin/gosec -fmt=json -out=harbor_gas_output.json -quiet ./... | true ; \
fi
go_check: gen_apis mocks_check misspell commentfmt lint go_check: gen_apis mocks_check misspell commentfmt lint
commentfmt: commentfmt:
@echo checking comment format... @echo checking comment format...
@res=$$(find . -type d \( -path ./src/vendor -o -path ./tests \) -prune -o -name '*.go' -print | xargs egrep '(^|\s)\/\/(\S)'|grep -v '//go:generate'); \ @res=$$(find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs egrep '(^|\s)\/\/(\S)'|grep -v '//go:generate'); \
if [ -n "$${res}" ]; then \ if [ -n "$${res}" ]; then \
echo checking comment format fail.. ; \ echo checking comment format fail.. ; \
echo missing whitespace between // and comment body;\ echo missing whitespace between // and comment body;\
@ -476,10 +466,10 @@ commentfmt:
misspell: misspell:
@echo checking misspell... @echo checking misspell...
@find . -type d \( -path ./src/vendor -o -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error @find . -type d \( -path ./tests \) -prune -o -name '*.go' -print | xargs misspell -error
# golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation # golangci-lint binary installation or refer to https://golangci-lint.run/usage/install/#local-installation
# curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.51.2 # curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2
GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint GOLANGCI_LINT := $(shell go env GOPATH)/bin/golangci-lint
lint: lint:
@echo checking lint @echo checking lint

View File

@ -33,8 +33,8 @@ Harbor is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CN
* **Role based access control**: Users access different repositories through 'projects' and a user can have different permission for images or Helm charts under a project. * **Role based access control**: Users access different repositories through 'projects' and a user can have different permission for images or Helm charts under a project.
* **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with using filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. This can be used to assist loadbalancing, achieve high availability, and facilitate multi-datacenter deployments in hybrid and multi-cloud scenarios. * **Policy based replication**: Images and charts can be replicated (synchronized) between multiple registry instances based on policies with using filters (repository, tag and label). Harbor automatically retries a replication if it encounters any errors. This can be used to assist loadbalancing, achieve high availability, and facilitate multi-datacenter deployments in hybrid and multi-cloud scenarios.
* **Vulnerability Scanning**: Harbor scans images regularly for vulnerabilities and has policy checks to prevent vulnerable images from being deployed. * **Vulnerability Scanning**: Harbor scans images regularly for vulnerabilities and has policy checks to prevent vulnerable images from being deployed.
* **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor that can then be given permissions to specific projects. * **LDAP/AD support**: Harbor integrates with existing enterprise LDAP/AD for user authentication and management, and supports importing LDAP groups into Harbor that can then be given permissions to specific projects.
* **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal. * **OIDC support**: Harbor leverages OpenID Connect (OIDC) to verify the identity of users authenticated by an external authorization server or identity provider. Single sign-on can be enabled to log into the Harbor portal.
* **Image deletion & garbage collection**: System admin can run garbage collection jobs so that images(dangling manifests and unreferenced blobs) can be deleted and their space can be freed up periodically. * **Image deletion & garbage collection**: System admin can run garbage collection jobs so that images(dangling manifests and unreferenced blobs) can be deleted and their space can be freed up periodically.
* **Notary**: Support signing container images using Docker Content Trust (leveraging Notary) for guaranteeing authenticity and provenance. In addition, policies that prevent unsigned images from being deployed can also be activated. * **Notary**: Support signing container images using Docker Content Trust (leveraging Notary) for guaranteeing authenticity and provenance. In addition, policies that prevent unsigned images from being deployed can also be activated.
* **Graphical user portal**: User can easily browse, search repositories and manage projects. * **Graphical user portal**: User can easily browse, search repositories and manage projects.
@ -55,7 +55,7 @@ For learning the architecture design of Harbor, check the document [Architecture
**System requirements:** **System requirements:**
**On a Linux host:** docker 17.06.0-ce+ and docker-compose 1.18.0+ . **On a Linux host:** docker 20.10.10-ce+ and docker-compose 1.18.0+ .
Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](https://goharbor.io/docs/latest/install-config/)** to install Harbor. Download binaries of **[Harbor release ](https://github.com/vmware/harbor/releases)** and follow **[Installation & Configuration Guide](https://goharbor.io/docs/latest/install-config/)** to install Harbor.
@ -77,8 +77,8 @@ The [compatibility list](https://goharbor.io/docs/edge/install-config/harbor-com
## Community ## Community
* **Twitter:** [@project_harbor](https://twitter.com/project_harbor) * **Twitter:** [@project_harbor](https://twitter.com/project_harbor)
* **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback. * **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback.
* **Developer Group:** Join Harbor developer group: [harbor-dev@lists.cncf.io](https://lists.cncf.io/g/harbor-dev) for discussion on Harbor development and contribution. * **Developer Group:** Join Harbor developer group: [harbor-dev@lists.cncf.io](https://lists.cncf.io/g/harbor-dev) for discussion on Harbor development and contribution.
* **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: [#harbor](https://cloud-native.slack.com/messages/harbor/) and [#harbor-dev](https://cloud-native.slack.com/messages/harbor-dev/) * **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: [#harbor](https://cloud-native.slack.com/messages/harbor/) and [#harbor-dev](https://cloud-native.slack.com/messages/harbor-dev/)

View File

@ -14,11 +14,11 @@ Patch releases are based on the major/minor release branch, the release cadence
`Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note they are done to assist in the stabilization process, no guarantees are provided. `Pre-releases:mainly the different RC builds` will be compiled from their corresponding branches. Please note they are done to assist in the stabilization process, no guarantees are provided.
### Minor Release Support Matrix ### Minor Release Support Matrix
| Version | Supported | | Version | Supported |
|---------------| ------------------ | |----------------| ------------------ |
| Harbor v2.9.x | :white_check_mark: | | Harbor v2.10.x | :white_check_mark: |
| Harbor v2.8.x | :white_check_mark: | | Harbor v2.9.x | :white_check_mark: |
| Harbor v2.7.x | :white_check_mark: | | Harbor v2.8.x | :white_check_mark: |
### Upgrade path and support policy ### Upgrade path and support policy
The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0. The upgrade path for Harbor is (1) 2.2.x patch releases are always compatible with its major and minor version. For example, previous released 2.2.x can be upgraded to most recent 2.2.3 release. (2) Harbor only supports two previous minor releases to upgrade to current minor release. For example, 2.3.0 will only support 2.1.0 and 2.2.0 to upgrade from, 2.0.0 to 2.3.0 is not supported. One should upgrade to 2.2.0 first, then to 2.3.0.

View File

@ -1 +1 @@
v2.10.0 v2.11.0

View File

@ -4719,7 +4719,7 @@ paths:
summary: Get job log by job id summary: Get job log by job id
description: Get job log by job id, it is only used by administrator description: Get job log by job id, it is only used by administrator
produces: produces:
- text/plain - text/plain
tags: tags:
- jobservice - jobservice
parameters: parameters:
@ -4850,7 +4850,6 @@ paths:
'200': '200':
description: Get scheduler status successfully. description: Get scheduler status successfully.
schema: schema:
type: object
$ref: '#/definitions/SchedulerStatus' $ref: '#/definitions/SchedulerStatus'
'401': '401':
$ref: '#/responses/401' $ref: '#/responses/401'
@ -6072,7 +6071,7 @@ paths:
description: Specify whether the dangerous Artifact are included inside summary information description: Specify whether the dangerous Artifact are included inside summary information
type: boolean type: boolean
required: false required: false
default: false default: false
responses: responses:
'200': '200':
description: Success description: Success
@ -6091,15 +6090,15 @@ paths:
get: get:
summary: Get the vulnerability list. summary: Get the vulnerability list.
description: | description: |
Get the vulnerability list. use q to pass the query condition, Get the vulnerability list. use q to pass the query condition,
supported conditions: supported conditions:
cve_id(exact match) cve_id(exact match)
cvss_score_v3(range condition) cvss_score_v3(range condition)
severity(exact match) severity(exact match)
repository_name(exact match) repository_name(exact match)
project_id(exact match) project_id(exact match)
package(exact match) package(exact match)
tag(exact match) tag(exact match)
digest(exact match) digest(exact match)
tags: tags:
- securityhub - securityhub
@ -6140,7 +6139,31 @@ paths:
'401': '401':
$ref: '#/responses/401' $ref: '#/responses/401'
'500': '500':
$ref: '#/responses/500' $ref: '#/responses/500'
/permissions:
get:
summary: Get system or project level permissions info.
operationId: getPermissions
description: |
This endpoint is for retrieving resource and action info that only provides for admin user(system admin and project admin).
tags:
- permissions
parameters:
- $ref: '#/parameters/requestId'
responses:
'200':
description: Get permissions successfully.
schema:
$ref: '#/definitions/Permissions'
'401':
$ref: '#/responses/401'
'403':
$ref: '#/responses/403'
'404':
$ref: '#/responses/404'
'500':
$ref: '#/responses/500'
parameters: parameters:
query: query:
@ -6151,7 +6174,7 @@ parameters:
required: false required: false
sort: sort:
name: sort name: sort
description: Sort the resource list in ascending or descending order. e.g. sort by field1 in ascending orderr and field2 in descending order with "sort=field1,-field2" description: Sort the resource list in ascending or descending order. e.g. sort by field1 in ascending order and field2 in descending order with "sort=field1,-field2"
in: query in: query
type: string type: string
required: false required: false
@ -6216,7 +6239,7 @@ parameters:
repositoryName: repositoryName:
name: repository_name name: repository_name
in: path in: path
description: The name of the repository. If it contains slash, encode it with URL encoding. e.g. a/b -> a%252Fb description: The name of the repository. If it contains slash, encode it twice over with URL encoding. e.g. a/b -> a%2Fb -> a%252Fb
required: true required: true
type: string type: string
reference: reference:
@ -7141,6 +7164,10 @@ definitions:
type: string type: string
description: 'Whether scan images automatically when pushing. The valid values are "true", "false".' description: 'Whether scan images automatically when pushing. The valid values are "true", "false".'
x-nullable: true x-nullable: true
auto_sbom_generation:
type: string
description: 'Whether generating SBOM automatically when pushing a subject artifact. The valid values are "true", "false".'
x-nullable: true
reuse_sys_cve_allowlist: reuse_sys_cve_allowlist:
type: string type: string
description: 'Whether this project reuse the system level CVE allowlist as the allowlist of its own. The valid values are "true", "false". description: 'Whether this project reuse the system level CVE allowlist as the allowlist of its own. The valid values are "true", "false".
@ -7633,8 +7660,9 @@ definitions:
description: The level of the robot, project or system description: The level of the robot, project or system
duration: duration:
type: integer type: integer
x-nullable: true
format: int64 format: int64
description: The duration of the robot in days description: The duration of the robot in days, duration must be either -1(Never) or a positive integer
editable: editable:
type: boolean type: boolean
x-omitempty: false x-omitempty: false
@ -7681,7 +7709,7 @@ definitions:
duration: duration:
type: integer type: integer
format: int64 format: int64
description: The duration of the robot in days description: The duration of the robot in days, duration must be either -1(Never) or a positive integer
permissions: permissions:
type: array type: array
items: items:
@ -7843,6 +7871,11 @@ definitions:
x-nullable: true x-nullable: true
x-omitempty: true x-omitempty: true
$ref: '#/definitions/AuthproxySetting' $ref: '#/definitions/AuthproxySetting'
oidc_provider_name:
type: string
x-nullable: true
x-omitempty: true
description: The OIDC provider name, empty if current auth is not OIDC_auth or OIDC provider is not configured.
AuthproxySetting: AuthproxySetting:
type: object type: object
properties: properties:
@ -7966,7 +7999,7 @@ definitions:
type: string type: string
description: | description: |
The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manual', 'None' and 'Schedule'. The schedule type. The valid values are 'Hourly', 'Daily', 'Weekly', 'Custom', 'Manual', 'None' and 'Schedule'.
'Manual' means to trigger it right away, 'Schedule' means to trigger it by a specified cron schedule and 'Manual' means to trigger it right away, 'Schedule' means to trigger it by a specified cron schedule and
'None' means to cancel the schedule. 'None' means to cancel the schedule.
enum: enum:
- Hourly - Hourly
@ -9397,6 +9430,19 @@ definitions:
action: action:
type: string type: string
description: The permission action description: The permission action
Permissions:
type: object
properties:
system:
type: array
description: The system level permissions
items:
$ref: '#/definitions/Permission'
project:
type: array
description: The project level permissions
items:
$ref: '#/definitions/Permission'
OIDCCliSecretReq: OIDCCliSecretReq:
type: object type: object
properties: properties:
@ -9772,12 +9818,12 @@ definitions:
type: object type: object
description: the dangerous CVE information description: the dangerous CVE information
properties: properties:
cve_id: cve_id:
type: string type: string
description: the cve id description: the cve id
severity: severity:
type: string type: string
description: the severity of the CVE description: the severity of the CVE
cvss_score_v3: cvss_score_v3:
type: number type: number
format: float64 format: float64
@ -9787,7 +9833,7 @@ definitions:
description: the description of the CVE description: the description of the CVE
package: package:
type: string type: string
description: the package of the CVE description: the package of the CVE
version: version:
type: string type: string
description: the version of the package description: the version of the package
@ -9795,14 +9841,14 @@ definitions:
type: object type: object
description: the dangerous artifact information description: the dangerous artifact information
properties: properties:
project_id: project_id:
type: integer type: integer
format: int64 format: int64
description: the project id of the artifact description: the project id of the artifact
repository_name: repository_name:
type: string type: string
description: the repository name of the artifact description: the repository name of the artifact
digest: digest:
type: string type: string
description: the digest of the artifact description: the digest of the artifact
critical_cnt: critical_cnt:
@ -9862,6 +9908,6 @@ definitions:
description: The description of the vulnerability description: The description of the vulnerability
links: links:
type: array type: array
items: items:
type: string type: string
description: Links of the vulnerability description: Links of the vulnerability

View File

@ -1,5 +1,5 @@
#!/bin/bash #!/bin/bash
#docker version: 17.06.0+ #docker version: 20.10.10+
#docker-compose version: 1.18.0+ #docker-compose version: 1.18.0+
#golang version: 1.12.0+ #golang version: 1.12.0+
@ -78,7 +78,7 @@ function check_golang {
function check_docker { function check_docker {
if ! docker --version &> /dev/null if ! docker --version &> /dev/null
then then
error "Need to install docker(17.06.0+) first and run this script again." error "Need to install docker(20.10.10+) first and run this script again."
exit 1 exit 1
fi fi
@ -93,7 +93,7 @@ function check_docker {
# the version of docker does not meet the requirement # the version of docker does not meet the requirement
if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ]) if [ "$docker_version_part1" -lt 17 ] || ([ "$docker_version_part1" -eq 17 ] && [ "$docker_version_part2" -lt 6 ])
then then
error "Need to upgrade docker package to 17.06.0+." error "Need to upgrade docker package to 20.10.10+."
exit 1 exit 1
fi fi
else else

View File

@ -16,6 +16,18 @@ https:
# The path of cert and key files for nginx # The path of cert and key files for nginx
certificate: /your/certificate/path certificate: /your/certificate/path
private_key: /your/private/key/path private_key: /your/private/key/path
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# # Harbor will set ipv4 enabled only by defualt if this block is not configured
# # Otherwise, please uncomment this block to configure your own ip_family stacks
# ip_family:
# # ipv6Enabled set to true if ipv6 is enabled in docker network, currently it affected the nginx related component
# ipv6:
# enabled: false
# # ipv4Enabled set to true by default, currently it affected the nginx related component
# ipv4:
# enabled: true
# # Uncomment following will enable tls communication between all harbor components # # Uncomment following will enable tls communication between all harbor components
# internal_tls: # internal_tls:
@ -23,8 +35,7 @@ https:
# enabled: true # enabled: true
# # put your cert and key files on dir # # put your cert and key files on dir
# dir: /etc/harbor/tls/internal # dir: /etc/harbor/tls/internal
# # enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
# Uncomment external_url if you want to enable external proxy # Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used # And when it enabled the hostname will no longer used
@ -62,7 +73,8 @@ data_volume: /data
# ca_bundle: # ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss # # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/ # # for more info about this configuration please refer https://distribution.github.io/distribution/about/configuration/
# # and https://distribution.github.io/distribution/storage-drivers/
# filesystem: # filesystem:
# maxthreads: 100 # maxthreads: 100
# # set disable to true when you want to disable registry redirect # # set disable to true when you want to disable registry redirect
@ -86,6 +98,10 @@ trivy:
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path. # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
skip_update: false skip_update: false
# #
# skipJavaDBUpdate If the flag is enabled you have to manually download the `trivy-java.db` file and mount it in the
# `/home/scanner/.cache/trivy/java-db/trivy-java.db` path
skip_java_db_update: false
#
# The offline_scan option prevents Trivy from sending API requests to identify dependencies. # The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it. # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
@ -153,7 +169,7 @@ log:
# port: 5140 # port: 5140
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY! #This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.9.0 _version: 2.10.0
# Uncomment external_database if using external database. # Uncomment external_database if using external database.
# external_database: # external_database:
@ -167,17 +183,6 @@ _version: 2.9.0
# max_idle_conns: 2 # max_idle_conns: 2
# max_open_conns: 0 # max_open_conns: 0
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_db_index: 7
# Uncomment redis if need to customize redis db # Uncomment redis if need to customize redis db
# redis: # redis:
# # db_index 0 is for core, it's unchangeable # # db_index 0 is for core, it's unchangeable
@ -303,4 +308,4 @@ cache:
# # By redis will bring up some delay for quota usage updation for display, so only # # By redis will bring up some delay for quota usage updation for display, so only
# # suggest switch provider to redis if you were ran into the db connections spike aroud # # suggest switch provider to redis if you were ran into the db connections spike aroud
# # the scenario of high concurrent pushing to same project, no improvment for other scenes. # # the scenario of high concurrent pushing to same project, no improvment for other scenes.
# quota_update_provider: redis # Or db # quota_update_provider: redis # Or db

View File

@ -1 +1,3 @@
DROP TABLE IF EXISTS harbor_resource_label; DROP TABLE IF EXISTS harbor_resource_label;
CREATE INDEX IF NOT EXISTS idx_artifact_accessory_subject_artifact_id ON artifact_accessory (subject_artifact_id);

View File

@ -12,7 +12,7 @@ COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \ RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
&& chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh && chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
ENTRYPOINT ["/docker-entrypoint.sh", "13", "14"] ENTRYPOINT ["/docker-entrypoint.sh", "14", "15"]
HEALTHCHECK CMD ["/docker-healthcheck.sh"] HEALTHCHECK CMD ["/docker-healthcheck.sh"]
USER postgres USER postgres

View File

@ -6,15 +6,15 @@ RUN tdnf install -y shadow >> /dev/null \
&& groupadd -r postgres --gid=999 \ && groupadd -r postgres --gid=999 \
&& useradd -m -r -g postgres --uid=999 postgres && useradd -m -r -g postgres --uid=999 postgres
RUN tdnf install -y postgresql13-server >> /dev/null RUN tdnf install -y postgresql14-server >> /dev/null
RUN tdnf install -y gzip postgresql14-server findutils bc >> /dev/null \ RUN tdnf install -y gzip postgresql15-server findutils bc >> /dev/null \
&& mkdir -p /docker-entrypoint-initdb.d \ && mkdir -p /docker-entrypoint-initdb.d \
&& mkdir -p /run/postgresql \ && mkdir -p /run/postgresql \
&& chown -R postgres:postgres /run/postgresql \ && chown -R postgres:postgres /run/postgresql \
&& chmod 2777 /run/postgresql \ && chmod 2777 /run/postgresql \
&& mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \ && mkdir -p "$PGDATA" && chown -R postgres:postgres "$PGDATA" && chmod 777 "$PGDATA" \
&& sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/pgsql/14/share/postgresql/postgresql.conf.sample \ && sed -i "s|#listen_addresses = 'localhost'.*|listen_addresses = '*'|g" /usr/pgsql/15/share/postgresql/postgresql.conf.sample \
&& sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/pgsql/14/share/postgresql/postgresql.conf.sample \ && sed -i "s|#unix_socket_directories = '/tmp'.*|unix_socket_directories = '/run/postgresql'|g" /usr/pgsql/15/share/postgresql/postgresql.conf.sample \
&& tdnf clean all && tdnf clean all
RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools

View File

@ -10,7 +10,7 @@ from migrations import accept_versions
@click.command() @click.command()
@click.option('-i', '--input', 'input_', required=True, help="The path of original config file") @click.option('-i', '--input', 'input_', required=True, help="The path of original config file")
@click.option('-o', '--output', default='', help="the path of output config file") @click.option('-o', '--output', default='', help="the path of output config file")
@click.option('-t', '--target', default='2.9.0', help="target version of input path") @click.option('-t', '--target', default='2.10.0', help="target version of input path")
def migrate(input_, output, target): def migrate(input_, output, target):
""" """
migrate command will migrate config file style to specific version migrate command will migrate config file style to specific version

View File

@ -2,4 +2,4 @@ import os
MIGRATION_BASE_DIR = os.path.dirname(__file__) MIGRATION_BASE_DIR = os.path.dirname(__file__)
accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0'} accept_versions = {'1.9.0', '1.10.0', '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0', '2.8.0', '2.9.0','2.10.0'}

View File

@ -0,0 +1,21 @@
import os
from jinja2 import Environment, FileSystemLoader, StrictUndefined, select_autoescape
from utils.migration import read_conf
revision = '2.10.0'
down_revisions = ['2.9.0']
def migrate(input_cfg, output_cfg):
current_dir = os.path.dirname(__file__)
tpl = Environment(
loader=FileSystemLoader(current_dir),
undefined=StrictUndefined,
trim_blocks=True,
lstrip_blocks=True,
autoescape = select_autoescape()
).get_template('harbor.yml.jinja')
config_dict = read_conf(input_cfg)
with open(output_cfg, 'w') as f:
f.write(tpl.render(**config_dict))

View File

@ -0,0 +1,668 @@
# Configuration file of Harbor
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
hostname: {{ hostname }}
# http related config
{% if http is defined %}
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: {{ http.port }}
{% else %}
# http:
# # port for http, default is 80. If https enabled, this port will redirect to https port
# port: 80
{% endif %}
{% if https is defined %}
# https related config
https:
# https port for harbor, default is 443
port: {{ https.port }}
# The path of cert and key files for nginx
certificate: {{ https.certificate }}
private_key: {{ https.private_key }}
# enable strong ssl ciphers (default: false)
{% if strong_ssl_ciphers is defined %}
strong_ssl_ciphers: {{ strong_ssl_ciphers | lower }}
{% else %}
strong_ssl_ciphers: false
{% endif %}
{% else %}
# https related config
# https:
# # https port for harbor, default is 443
# port: 443
# # The path of cert and key files for nginx
# certificate: /your/certificate/path
# private_key: /your/private/key/path
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
{% endif %}
{% if internal_tls is defined %}
# Uncomment following will enable tls communication between all harbor components
internal_tls:
# set enabled to true means internal tls is enabled
enabled: {{ internal_tls.enabled | lower }}
{% if internal_tls.dir is defined %}
# put your cert and key files on dir
dir: {{ internal_tls.dir }}
{% endif %}
{% else %}
# internal_tls:
# # set enabled to true means internal tls is enabled
# enabled: true
# # put your cert and key files on dir
# dir: /etc/harbor/tls/internal
{% endif %}
# Uncomment external_url if you want to enable external proxy
# And when it enabled the hostname will no longer used
{% if external_url is defined %}
external_url: {{ external_url }}
{% else %}
# external_url: https://reg.mydomain.com:8433
{% endif %}
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
{% if harbor_admin_password is defined %}
harbor_admin_password: {{ harbor_admin_password }}
{% else %}
harbor_admin_password: Harbor12345
{% endif %}
# Harbor DB configuration
database:
{% if database is defined %}
# The password for the root user of Harbor DB. Change this before any production use.
password: {{ database.password}}
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: {{ database.max_idle_conns }}
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: {{ database.max_open_conns }}
{% else %}
# The password for the root user of Harbor DB. Change this before any production use.
password: root123
# The maximum number of connections in the idle connection pool. If it <=0, no idle connections are retained.
max_idle_conns: 100
# The maximum number of open connections to the database. If it <= 0, then there is no limit on the number of open connections.
# Note: the default number of connections is 1024 for postgres of harbor.
max_open_conns: 900
{% endif %}
{% if data_volume is defined %}
# The default data volume
data_volume: {{ data_volume }}
{% else %}
# The default data volume
data_volume: /data
{% endif %}
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
{% if storage_service is defined %}
storage_service:
{% for key, value in storage_service.items() %}
{% if key == 'ca_bundle' %}
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
ca_bundle: {{ value if value is not none else '' }}
{% elif key == 'redirect' %}
# # set disable to true when you want to disable registry redirect
redirect:
{% if storage_service.redirect.disabled is defined %}
disable: {{ storage_service.redirect.disabled | lower}}
{% else %}
disable: {{ storage_service.redirect.disable | lower}}
{% endif %}
{% else %}
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
{{ key }}:
{% for k, v in value.items() %}
{{ k }}: {{ v if v is not none else '' }}
{% endfor %}
{% endif %}
{% endfor %}
{% else %}
# Harbor Storage settings by default is using /data dir on local filesystem
# Uncomment storage_service setting If you want to using external storage
# storage_service:
# # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
# # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
# ca_bundle:
# # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
# # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
# filesystem:
# maxthreads: 100
# # set disable to true when you want to disable registry redirect
# redirect:
# disable: false
{% endif %}
# Trivy configuration
#
# Trivy DB contains vulnerability information from NVD, Red Hat, and many other upstream vulnerability databases.
# It is downloaded by Trivy from the GitHub release page https://github.com/aquasecurity/trivy-db/releases and cached
# in the local file system. In addition, the database contains the update timestamp so Trivy can detect whether it
# should download a newer version from the Internet or use the cached one. Currently, the database is updated every
# 12 hours and published as a new release to GitHub.
{% if trivy is defined %}
trivy:
# ignoreUnfixed The flag to display only fixed vulnerabilities
{% if trivy.ignore_unfixed is defined %}
ignore_unfixed: {{ trivy.ignore_unfixed | lower }}
{% else %}
ignore_unfixed: false
{% endif %}
# timeout The duration to wait for scan completion
{% if trivy.timeout is defined %}
timeout: {{ trivy.timeout }}
{% else %}
timeout: 5m0s
{% endif %}
# skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
#
# You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
{% if trivy.skip_update is defined %}
skip_update: {{ trivy.skip_update | lower }}
{% else %}
skip_update: false
{% endif %}
#
{% if trivy.offline_scan is defined %}
offline_scan: {{ trivy.offline_scan | lower }}
{% else %}
offline_scan: false
{% endif %}
#
# Comma-separated list of what security issues to detect. Possible values are `vuln`, `config` and `secret`. Defaults to `vuln`.
{% if trivy.security_check is defined %}
security_check: {{ trivy.security_check }}
{% else %}
security_check: vuln
{% endif %}
#
# insecure The flag to skip verifying registry certificate
{% if trivy.insecure is defined %}
insecure: {{ trivy.insecure | lower }}
{% else %}
insecure: false
{% endif %}
# github_token The GitHub access token to download Trivy DB
#
# Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# https://developer.github.com/v3/#rate-limiting
#
# You can create a GitHub token by following the instructions in
# https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
#
{% if trivy.github_token is defined %}
github_token: {{ trivy.github_token }}
{% else %}
# github_token: xxx
{% endif %}
{% else %}
# trivy:
# # ignoreUnfixed The flag to display only fixed vulnerabilities
# ignore_unfixed: false
# # skipUpdate The flag to enable or disable Trivy DB downloads from GitHub
# #
# # You might want to enable this flag in test or CI/CD environments to avoid GitHub rate limiting issues.
# # If the flag is enabled you have to download the `trivy-offline.tar.gz` archive manually, extract `trivy.db` and
# # `metadata.json` files and mount them in the `/home/scanner/.cache/trivy/db` path.
# skip_update: false
# #
# #The offline_scan option prevents Trivy from sending API requests to identify dependencies.
# # Scanning JAR files and pom.xml may require Internet access for better detection, but this option tries to avoid it.
# # For example, the offline mode will not try to resolve transitive dependencies in pom.xml when the dependency doesn't
# # exist in the local repositories. It means a number of detected vulnerabilities might be fewer in offline mode.
# # It would work if all the dependencies are in local.
# # This option doesnt affect DB download. You need to specify "skip-update" as well as "offline-scan" in an air-gapped environment.
# offline_scan: false
# #
# # insecure The flag to skip verifying registry certificate
# insecure: false
# # github_token The GitHub access token to download Trivy DB
# #
# # Anonymous downloads from GitHub are subject to the limit of 60 requests per hour. Normally such rate limit is enough
# # for production operations. If, for any reason, it's not enough, you could increase the rate limit to 5000
# # requests per hour by specifying the GitHub access token. For more details on GitHub rate limiting please consult
# # https://developer.github.com/v3/#rate-limiting
# #
# # You can create a GitHub token by following the instructions in
# # https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line
# #
# # github_token: xxx
{% endif %}
jobservice:
# Maximum number of job workers in job service
{% if jobservice is defined %}
max_job_workers: {{ jobservice.max_job_workers }}
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
{% if jobservice.job_loggers is defined %}
job_loggers:
{% for job_logger in jobservice.job_loggers %}
- {{job_logger}}
{% endfor %}
{% else %}
job_loggers:
- STD_OUTPUT
- FILE
# - DB
{% endif %}
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
{% if jobservice.logger_sweeper_duration is defined %}
logger_sweeper_duration: {{ jobservice.logger_sweeper_duration }}
{% else %}
logger_sweeper_duration: 1
{% endif %}
{% else %}
max_job_workers: 10
# The jobLoggers backend name, only support "STD_OUTPUT", "FILE" and/or "DB"
job_loggers:
- STD_OUTPUT
- FILE
# - DB
# The jobLogger sweeper duration (ignored if `jobLogger` is `stdout`)
logger_sweeper_duration: 1
{% endif %}
notification:
# Maximum retry count for webhook job
{% if notification is defined %}
webhook_job_max_retry: {{ notification.webhook_job_max_retry}}
# HTTP client timeout for webhook job
{% if notification.webhook_job_http_client_timeout is defined %}
webhook_job_http_client_timeout: {{ notification.webhook_job_http_client_timeout }}
{% else %}
webhook_job_http_client_timeout: 3 #seconds
{% endif %}
{% else %}
webhook_job_max_retry: 3
# HTTP client timeout for webhook job
webhook_job_http_client_timeout: 3 #seconds
{% endif %}
# Log configurations
log:
# options are debug, info, warning, error, fatal
{% if log is defined %}
level: {{ log.level }}
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: {{ log.local.rotate_count }}
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: {{ log.local.rotate_size }}
# The directory on your host that store log
location: {{ log.local.location }}
{% if log.external_endpoint is defined %}
external_endpoint:
# protocol used to transmit log to external endpoint, options is tcp or udp
protocol: {{ log.external_endpoint.protocol }}
# The host of external endpoint
host: {{ log.external_endpoint.host }}
# Port of external endpoint
port: {{ log.external_endpoint.port }}
{% else %}
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
{% endif %}
{% else %}
level: info
# configs for logs in local storage
local:
# Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
rotate_count: 50
# Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
# If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
# are all valid.
rotate_size: 200M
# The directory on your host that store log
location: /var/log/harbor
# Uncomment following lines to enable external syslog endpoint.
# external_endpoint:
# # protocol used to transmit log to external endpoint, options is tcp or udp
# protocol: tcp
# # The host of external endpoint
# host: localhost
# # Port of external endpoint
# port: 5140
{% endif %}
#This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
_version: 2.10.0
{% if external_database is defined %}
# Uncomment external_database if using external database.
external_database:
harbor:
host: {{ external_database.harbor.host }}
port: {{ external_database.harbor.port }}
db_name: {{ external_database.harbor.db_name }}
username: {{ external_database.harbor.username }}
password: {{ external_database.harbor.password }}
ssl_mode: {{ external_database.harbor.ssl_mode }}
max_idle_conns: {{ external_database.harbor.max_idle_conns}}
max_open_conns: {{ external_database.harbor.max_open_conns}}
{% else %}
# Uncomment external_database if using external database.
# external_database:
# harbor:
# host: harbor_db_host
# port: harbor_db_port
# db_name: harbor_db_name
# username: harbor_db_username
# password: harbor_db_password
# ssl_mode: disable
# max_idle_conns: 2
# max_open_conns: 0
{% endif %}
{% if redis is defined %}
redis:
# # db_index 0 is for core, it's unchangeable
{% if redis.registry_db_index is defined %}
registry_db_index: {{ redis.registry_db_index }}
{% else %}
# # registry_db_index: 1
{% endif %}
{% if redis.jobservice_db_index is defined %}
jobservice_db_index: {{ redis.jobservice_db_index }}
{% else %}
# # jobservice_db_index: 2
{% endif %}
{% if redis.trivy_db_index is defined %}
trivy_db_index: {{ redis.trivy_db_index }}
{% else %}
# # trivy_db_index: 5
{% endif %}
{% if redis.harbor_db_index is defined %}
harbor_db_index: {{ redis.harbor_db_index }}
{% else %}
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
{% endif %}
{% if redis.cache_layer_db_index is defined %}
cache_layer_db_index: {{ redis.cache_layer_db_index }}
{% else %}
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
{% endif %}
{% else %}
# Uncomment redis if need to customize redis db
# redis:
# # db_index 0 is for core, it's unchangeable
# # registry_db_index: 1
# # jobservice_db_index: 2
# # trivy_db_index: 5
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
{% endif %}
{% if external_redis is defined %}
external_redis:
# support redis, redis+sentinel
# host for redis: <host_redis>:<port_redis>
# host for redis+sentinel:
# <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
host: {{ external_redis.host }}
password: {{ external_redis.password }}
# Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
{% if external_redis.username is defined %}
username: {{ external_redis.username }}
{% else %}
# username:
{% endif %}
# sentinel_master_set must be set to support redis+sentinel
#sentinel_master_set:
# db_index 0 is for core, it's unchangeable
registry_db_index: {{ external_redis.registry_db_index }}
jobservice_db_index: {{ external_redis.jobservice_db_index }}
trivy_db_index: 5
idle_timeout_seconds: 30
{% if external_redis.harbor_db_index is defined %}
harbor_db_index: {{ redis.harbor_db_index }}
{% else %}
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
{% endif %}
{% if external_redis.cache_layer_db_index is defined %}
cache_layer_db_index: {{ redis.cache_layer_db_index }}
{% else %}
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
{% endif %}
{% else %}
# Umcomments external_redis if using external Redis server
# external_redis:
# # support redis, redis+sentinel
# # host for redis: <host_redis>:<port_redis>
# # host for redis+sentinel:
# # <host_sentinel1>:<port_sentinel1>,<host_sentinel2>:<port_sentinel2>,<host_sentinel3>:<port_sentinel3>
# host: redis:6379
# password:
# # Redis AUTH command was extended in Redis 6, it is possible to use it in the two-arguments AUTH <username> <password> form.
# # username:
# # sentinel_master_set must be set to support redis+sentinel
# #sentinel_master_set:
# # db_index 0 is for core, it's unchangeable
# registry_db_index: 1
# jobservice_db_index: 2
# trivy_db_index: 5
# idle_timeout_seconds: 30
# # it's optional, the db for harbor business misc, by default is 0, uncomment it if you want to change it.
# # harbor_db_index: 6
# # it's optional, the db for harbor cache layer, by default is 0, uncomment it if you want to change it.
# # cache_layer_db_index: 7
{% endif %}
{% if uaa is defined %}
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
uaa:
ca_file: {{ uaa.ca_file }}
{% else %}
# Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
# uaa:
# ca_file: /path/to/ca
{% endif %}
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
# Components doesn't need to connect to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
# Add domain to the `no_proxy` field, when you want disable proxy
# for some special registry.
{% if proxy is defined %}
proxy:
http_proxy: {{ proxy.http_proxy or ''}}
https_proxy: {{ proxy.https_proxy or ''}}
no_proxy: {{ proxy.no_proxy or ''}}
{% if proxy.components is defined %}
components:
{% for component in proxy.components %}
{% if component != 'clair' %}
- {{component}}
{% endif %}
{% endfor %}
{% endif %}
{% else %}
proxy:
http_proxy:
https_proxy:
no_proxy:
components:
- core
- jobservice
- trivy
{% endif %}
{% if metric is defined %}
metric:
enabled: {{ metric.enabled }}
port: {{ metric.port }}
path: {{ metric.path }}
{% else %}
# metric:
# enabled: false
# port: 9090
# path: /metric
{% endif %}
# Trace related config
# only can enable one trace provider(jaeger or otel) at the same time,
# and when using jaeger as provider, can only enable it with agent mode or collector mode.
# if using jaeger collector mode, uncomment endpoint and uncomment username, password if needed
# if using jaeger agetn mode uncomment agent_host and agent_port
{% if trace is defined %}
trace:
enabled: {{ trace.enabled | lower}}
sample_rate: {{ trace.sample_rate }}
# # namespace used to diferenciate different harbor services
{% if trace.namespace is defined %}
namespace: {{ trace.namespace }}
{% else %}
# namespace:
{% endif %}
# # attributes is a key value dict contains user defined attributes used to initialize trace provider
{% if trace.attributes is defined%}
attributes:
{% for name, value in trace.attributes.items() %}
{{name}}: {{value}}
{% endfor %}
{% else %}
# attributes:
# application: harbor
{% endif %}
{% if trace.jaeger is defined%}
jaeger:
endpoint: {{trace.jaeger.endpoint or '' }}
username: {{trace.jaeger.username or ''}}
password: {{trace.jaeger.password or ''}}
agent_host: {{trace.jaeger.agent_host or ''}}
agent_port: {{trace.jaeger.agent_port or ''}}
{% else %}
# jaeger:
# endpoint:
# username:
# password:
# agent_host:
# agent_port:
{% endif %}
{% if trace. otel is defined %}
otel:
endpoint: {{trace.otel.endpoint or '' }}
url_path: {{trace.otel.url_path or '' }}
compression: {{trace.otel.compression | lower }}
insecure: {{trace.otel.insecure | lower }}
timeout: {{trace.otel.timeout or '' }}
{% else %}
# otel:
# endpoint: hostname:4318
# url_path: /v1/traces
# compression: false
# insecure: true
# # timeout is in seconds
# timeout: 10
{% endif%}
{% else %}
# trace:
# enabled: true
# # set sample_rate to 1 if you wanna sampling 100% of trace data; set 0.5 if you wanna sampling 50% of trace data, and so forth
# sample_rate: 1
# # # namespace used to differenciate different harbor services
# # namespace:
# # # attributes is a key value dict contains user defined attributes used to initialize trace provider
# # attributes:
# # application: harbor
# # jaeger:
# # endpoint: http://hostname:14268/api/traces
# # username:
# # password:
# # agent_host: hostname
# # agent_port: 6832
# # otel:
# # endpoint: hostname:4318
# # url_path: /v1/traces
# # compression: false
# # insecure: true
# # # timeout is in seconds
# # timeout: 10
{% endif %}
# enable purge _upload directories
{% if upload_purging is defined %}
upload_purging:
enabled: {{ upload_purging.enabled | lower}}
age: {{ upload_purging.age }}
interval: {{ upload_purging.interval }}
dryrun: {{ upload_purging.dryrun | lower}}
{% else %}
upload_purging:
enabled: true
# remove files in _upload directories which exist for a period of time, default is one week.
age: 168h
# the interval of the purge operations
interval: 24h
dryrun: false
{% endif %}
# Cache related config
{% if cache is defined %}
cache:
enabled: {{ cache.enabled | lower}}
expire_hours: {{ cache.expire_hours }}
{% else %}
cache:
enabled: false
expire_hours: 24
{% endif %}
# Harbor core configurations
{% if core is defined %}
core:
# The provider for updating project quota(usage), there are 2 options, redis or db,
# by default is implemented by db but you can switch the updation via redis which
# can improve the performance of high concurrent pushing to the same project,
# and reduce the database connections spike and occupies.
# By redis will bring up some delay for quota usage updation for display, so only
# suggest switch provider to redis if you were ran into the db connections spike aroud
# the scenario of high concurrent pushing to same project, no improvment for other scenes.
quota_update_provider: {{ core.quota_update_provider }}
{% else %}
# core:
# # The provider for updating project quota(usage), there are 2 options, redis or db,
# # by default is implemented by db but you can switch the updation via redis which
# # can improve the performance of high concurrent pushing to the same project,
# # and reduce the database connections spike and occupies.
# # By redis will bring up some delay for quota usage updation for display, so only
# # suggest switch provider to redis if you were ran into the db connections spike aroud
# # the scenario of high concurrent pushing to same project, no improvment for other scenes.
# quota_update_provider: redis # Or db
{% endif %}

View File

@ -50,7 +50,12 @@ http {
include /etc/nginx/conf.d/*.server.conf; include /etc/nginx/conf.d/*.server.conf;
server { server {
{% if ip_family.ipv4.enabled %}
listen 8443 ssl; listen 8443 ssl;
{% endif %}
{% if ip_family.ipv6.enabled %}
listen [::]:8443 ssl;
{% endif %}
# server_name harbordomain.com; # server_name harbordomain.com;
server_tokens off; server_tokens off;
# SSL # SSL
@ -59,7 +64,7 @@ http {
# Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1.2 TLSv1.3; ssl_protocols TLSv1.2 TLSv1.3;
{% if internal_tls.strong_ssl_ciphers %} {% if strong_ssl_ciphers %}
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
{% else %} {% else %}
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';

View File

@ -16,13 +16,19 @@ http {
server { server {
{% if internal_tls.enabled %} {% if internal_tls.enabled %}
#ip_family
{% if ip_family.ipv4.enabled %}
listen 8443 ssl; listen 8443 ssl;
{% endif %}
{% if ip_family.ipv6.enabled %}
listen [::]:8443 ssl;
{% endif %}
# SSL # SSL
ssl_certificate /etc/harbor/tls/portal.crt; ssl_certificate /etc/harbor/tls/portal.crt;
ssl_certificate_key /etc/harbor/tls/portal.key; ssl_certificate_key /etc/harbor/tls/portal.key;
ssl_protocols TLSv1.2 TLSv1.3; ssl_protocols TLSv1.2 TLSv1.3;
{% if internal_tls.strong_ssl_ciphers %} {% if strong_ssl_ciphers %}
ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128; ssl_ciphers ECDHE+AESGCM:DHE+AESGCM:ECDHE+RSA+SHA256:DHE+RSA+SHA256:!AES128;
{% else %} {% else %}
ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:'; ssl_ciphers '!aNULL:kECDH+AESGCM:ECDH+AESGCM:RSA+AESGCM:kECDH+AES:ECDH+AES:RSA+AES:';

View File

@ -10,6 +10,7 @@ SCANNER_TRIVY_VULN_TYPE=os,library
SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL SCANNER_TRIVY_SEVERITY=UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL
SCANNER_TRIVY_IGNORE_UNFIXED={{trivy_ignore_unfixed}} SCANNER_TRIVY_IGNORE_UNFIXED={{trivy_ignore_unfixed}}
SCANNER_TRIVY_SKIP_UPDATE={{trivy_skip_update}} SCANNER_TRIVY_SKIP_UPDATE={{trivy_skip_update}}
SCANNER_TRIVY_SKIP_JAVA_DB_UPDATE={{trivy_skip_java_db_update}}
SCANNER_TRIVY_OFFLINE_SCAN={{trivy_offline_scan}} SCANNER_TRIVY_OFFLINE_SCAN={{trivy_offline_scan}}
SCANNER_TRIVY_SECURITY_CHECKS={{trivy_security_check}} SCANNER_TRIVY_SECURITY_CHECKS={{trivy_security_check}}
SCANNER_TRIVY_GITHUB_TOKEN={{trivy_github_token}} SCANNER_TRIVY_GITHUB_TOKEN={{trivy_github_token}}

View File

@ -212,6 +212,7 @@ def parse_yaml_config(config_file_path, with_trivy):
trivy_configs = configs.get("trivy") or {} trivy_configs = configs.get("trivy") or {}
config_dict['trivy_github_token'] = trivy_configs.get("github_token") or '' config_dict['trivy_github_token'] = trivy_configs.get("github_token") or ''
config_dict['trivy_skip_update'] = trivy_configs.get("skip_update") or False config_dict['trivy_skip_update'] = trivy_configs.get("skip_update") or False
config_dict['trivy_skip_java_db_update'] = trivy_configs.get("skip_java_db_update") or False
config_dict['trivy_offline_scan'] = trivy_configs.get("offline_scan") or False config_dict['trivy_offline_scan'] = trivy_configs.get("offline_scan") or False
config_dict['trivy_security_check'] = trivy_configs.get("security_check") or 'vuln' config_dict['trivy_security_check'] = trivy_configs.get("security_check") or 'vuln'
config_dict['trivy_ignore_unfixed'] = trivy_configs.get("ignore_unfixed") or False config_dict['trivy_ignore_unfixed'] = trivy_configs.get("ignore_unfixed") or False
@ -298,6 +299,20 @@ def parse_yaml_config(config_file_path, with_trivy):
external_database=config_dict['external_database']) external_database=config_dict['external_database'])
else: else:
config_dict['internal_tls'] = InternalTLS() config_dict['internal_tls'] = InternalTLS()
# the configure item apply to internal and external tls communication
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
# but it is more reasonable to configure it in https_config
if https_config:
config_dict['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers')
else:
config_dict['strong_ssl_ciphers'] = False
if internal_tls_config:
config_dict['strong_ssl_ciphers'] = config_dict['strong_ssl_ciphers'] or internal_tls_config.get('strong_ssl_ciphers')
# ip_family config
config_dict['ip_family'] = configs.get('ip_family') or {'ipv4': {'enabled': True}, 'ipv6': {'enabled': False}}
# metric configs # metric configs
metric_config = configs.get('metric') metric_config = configs.get('metric')

View File

@ -27,6 +27,12 @@ def read_conf(path):
with open(path) as f: with open(path) as f:
try: try:
d = yaml.safe_load(f) d = yaml.safe_load(f)
# the strong_ssl_ciphers configure item apply to internal and external tls communication
# for compatibility, user could configure the strong_ssl_ciphers either in https section or under internal_tls section,
# but it will move to https section after migration
https_config = d.get("https") or {}
internal_tls = d.get('internal_tls') or {}
d['strong_ssl_ciphers'] = https_config.get('strong_ssl_ciphers') or internal_tls.get('strong_ssl_ciphers')
except Exception as e: except Exception as e:
click.echo("parse config file err, make sure your harbor config version is above 1.8.0", e) click.echo("parse config file err, make sure your harbor config version is above 1.8.0", e)
exit(-1) exit(-1)

View File

@ -63,7 +63,9 @@ def render_nginx_template(config_dict):
ssl_cert=SSL_CERT_PATH, ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH, ssl_cert_key=SSL_CERT_KEY_PATH,
internal_tls=config_dict['internal_tls'], internal_tls=config_dict['internal_tls'],
metric=config_dict['metric']) metric=config_dict['metric'],
strong_ssl_ciphers=config_dict['strong_ssl_ciphers'],
ip_family=config_dict['ip_family'])
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
else: else:

View File

@ -14,5 +14,8 @@ def prepare_portal(config_dict):
str(portal_conf_template_path), str(portal_conf_template_path),
portal_conf, portal_conf,
internal_tls=config_dict['internal_tls'], internal_tls=config_dict['internal_tls'],
ip_family=config_dict['ip_family'],
uid=DEFAULT_UID, uid=DEFAULT_UID,
gid=DEFAULT_GID) gid=DEFAULT_GID,
strong_ssl_ciphers=config_dict['strong_ssl_ciphers']
)

View File

@ -1,4 +1,4 @@
FROM golang:1.20.10 FROM golang:1.21.5
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV BUILDTAGS include_oss include_gcs ENV BUILDTAGS include_oss include_gcs

View File

@ -1,19 +1,19 @@
diff --git a/configuration/configuration.go b/configuration/configuration.go diff --git a/configuration/configuration.go b/configuration/configuration.go
index dd315485..a3e0818e 100644 index 7076df85d4..3e74330321 100644
--- a/configuration/configuration.go --- a/configuration/configuration.go
+++ b/configuration/configuration.go +++ b/configuration/configuration.go
@@ -168,6 +168,9 @@ type Configuration struct { @@ -168,6 +168,9 @@ type Configuration struct {
// Addr specifies the the redis instance available to the application. // Addr specifies the the redis instance available to the application.
Addr string `yaml:"addr,omitempty"` Addr string `yaml:"addr,omitempty"`
+ // SentinelMasterSet specifies the the redis sentinel master set name. + // SentinelMasterSet specifies the the redis sentinel master set name.
+ SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"` + SentinelMasterSet string `yaml:"sentinelMasterSet,omitempty"`
+ +
// Password string to use when making a connection. // Password string to use when making a connection.
Password string `yaml:"password,omitempty"` Password string `yaml:"password,omitempty"`
diff --git a/registry/handlers/app.go b/registry/handlers/app.go diff --git a/registry/handlers/app.go b/registry/handlers/app.go
index 8a30bd4d..4e9cec34 100644 index bf56cea22a..4a7cee9a2e 100644
--- a/registry/handlers/app.go --- a/registry/handlers/app.go
+++ b/registry/handlers/app.go +++ b/registry/handlers/app.go
@@ -3,6 +3,7 @@ package handlers @@ -3,6 +3,7 @@ package handlers
@ -24,19 +24,18 @@ index 8a30bd4d..4e9cec34 100644
"expvar" "expvar"
"fmt" "fmt"
"math" "math"
@@ -16,6 +17,8 @@ import ( @@ -16,6 +17,7 @@ import (
"strings" "strings"
"time" "time"
+ "github.com/FZambia/sentinel" + "github.com/FZambia/sentinel"
+ "github.com/distribution/reference"
"github.com/docker/distribution" "github.com/docker/distribution"
"github.com/docker/distribution/configuration" "github.com/docker/distribution/configuration"
dcontext "github.com/docker/distribution/context" @@ -499,6 +501,45 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
@@ -499,6 +502,44 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
return return
} }
+ var getRedisAddr func() (string, error) + var getRedisAddr func() (string, error)
+ var testOnBorrow func(c redis.Conn, t time.Time) error + var testOnBorrow func(c redis.Conn, t time.Time) error
+ if configuration.Redis.SentinelMasterSet != "" { + if configuration.Redis.SentinelMasterSet != "" {
@ -75,13 +74,14 @@ index 8a30bd4d..4e9cec34 100644
+ return err + return err
+ } + }
+ } + }
+
pool := &redis.Pool{ pool := &redis.Pool{
Dial: func() (redis.Conn, error) { Dial: func() (redis.Conn, error) {
// TODO(stevvooe): Yet another use case for contextual timing. // TODO(stevvooe): Yet another use case for contextual timing.
@@ -514,8 +555,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) { @@ -514,8 +555,11 @@ func (app *App) configureRedis(configuration *configuration.Configuration) {
} }
} }
- conn, err := redis.DialTimeout("tcp", - conn, err := redis.DialTimeout("tcp",
- configuration.Redis.Addr, - configuration.Redis.Addr,
+ redisAddr, err := getRedisAddr() + redisAddr, err := getRedisAddr()
@ -112,10 +112,10 @@ index 8a30bd4d..4e9cec34 100644
+ TestOnBorrow: testOnBorrow, + TestOnBorrow: testOnBorrow,
+ Wait: false, // if a connection is not available, proceed without cache. + Wait: false, // if a connection is not available, proceed without cache.
} }
app.redis = pool app.redis = pool
diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go diff --git a/registry/handlers/app_test.go b/registry/handlers/app_test.go
index 60a57e6c..8a644d83 100644 index 60a57e6c15..8a644d83d8 100644
--- a/registry/handlers/app_test.go --- a/registry/handlers/app_test.go
+++ b/registry/handlers/app_test.go +++ b/registry/handlers/app_test.go
@@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) { @@ -140,7 +140,29 @@ func TestAppDispatcher(t *testing.T) {
@ -157,7 +157,7 @@ index 60a57e6c..8a644d83 100644
+ config.Redis.DB = 0 + config.Redis.DB = 0
+ runAppWithConfig(t, config) + runAppWithConfig(t, config)
+} +}
+// TestNewApp covers the creation of an application via NewApp with a +// TestNewApp covers the creation of an application via NewApp with a
+// configuration(with redis sentinel cluster). +// configuration(with redis sentinel cluster).
+func TestNewAppWithRedisSentinelCluster(t *testing.T) { +func TestNewAppWithRedisSentinelCluster(t *testing.T) {
@ -189,18 +189,17 @@ index 60a57e6c..8a644d83 100644
// ensuring that NewApp doesn't panic. We might want to tweak this // ensuring that NewApp doesn't panic. We might want to tweak this
// behavior. // behavior.
diff --git a/vendor.conf b/vendor.conf diff --git a/vendor.conf b/vendor.conf
index bd1b4bff..a45ac137 100644 index 33fe616b76..a8d8f58bc6 100644
--- a/vendor.conf --- a/vendor.conf
+++ b/vendor.conf +++ b/vendor.conf
@@ -49,3 +49,4 @@ gopkg.in/yaml.v2 v2.2.1 @@ -51,3 +51,4 @@ gopkg.in/yaml.v2 v2.2.1
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/go-digest ea51bea511f75cfa3ef6098cc253c5c3609b037a # v1.0.0
github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2 github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2
+github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git +github.com/FZambia/sentinel 5585739eb4b6478aa30161866ccf9ce0ef5847c7 https://github.com/jeremyxu2010/sentinel.git
\ No newline at end of file
diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE diff --git a/vendor/github.com/FZambia/sentinel/LICENSE b/vendor/github.com/FZambia/sentinel/LICENSE
new file mode 100644 new file mode 100644
index 00000000..9c8f3ea0 index 0000000000..8dada3edaf
--- /dev/null --- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/LICENSE +++ b/vendor/github.com/FZambia/sentinel/LICENSE
@@ -0,0 +1,201 @@ @@ -0,0 +1,201 @@
@ -405,10 +404,9 @@ index 00000000..9c8f3ea0
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and + See the License for the specific language governing permissions and
+ limitations under the License. + limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md diff --git a/vendor/github.com/FZambia/sentinel/README.md b/vendor/github.com/FZambia/sentinel/README.md
new file mode 100644 new file mode 100644
index 00000000..fc810435 index 0000000000..f544c54ef6
--- /dev/null --- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/README.md +++ b/vendor/github.com/FZambia/sentinel/README.md
@@ -0,0 +1,39 @@ @@ -0,0 +1,39 @@
@ -450,11 +448,10 @@ index 00000000..fc810435
+License +License
+------- +-------
+ +
+Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html). +Library is available under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).
\ No newline at end of file
diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go diff --git a/vendor/github.com/FZambia/sentinel/sentinel.go b/vendor/github.com/FZambia/sentinel/sentinel.go
new file mode 100644 new file mode 100644
index 00000000..98dea26d index 0000000000..79209e9f0d
--- /dev/null --- /dev/null
+++ b/vendor/github.com/FZambia/sentinel/sentinel.go +++ b/vendor/github.com/FZambia/sentinel/sentinel.go
@@ -0,0 +1,426 @@ @@ -0,0 +1,426 @@

View File

@ -1,4 +1,4 @@
FROM golang:1.20.10 FROM golang:1.21.5
ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/ ADD . /go/src/github.com/aquasecurity/harbor-scanner-trivy/
WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/ WORKDIR /go/src/github.com/aquasecurity/harbor-scanner-trivy/

View File

@ -19,7 +19,7 @@ TEMP=$(mktemp -d ${TMPDIR-/tmp}/trivy-adapter.XXXXXX)
git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP git clone https://github.com/aquasecurity/harbor-scanner-trivy.git $TEMP
cd $TEMP; git checkout $VERSION; cd - cd $TEMP; git checkout $VERSION; cd -
echo "Building Trivy adapter binary based on golang:1.20.10..." echo "Building Trivy adapter binary based on golang:1.21.5..."
cp Dockerfile.binary $TEMP cp Dockerfile.binary $TEMP
docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP docker build -f $TEMP/Dockerfile.binary -t trivy-adapter-golang $TEMP

View File

@ -14,6 +14,8 @@
package common package common
import "time"
type contextKey string type contextKey string
// const variables // const variables
@ -184,6 +186,7 @@ const (
TraceOtelTimeout = "trace_otel_timeout" TraceOtelTimeout = "trace_otel_timeout"
GDPRDeleteUser = "gdpr_delete_user" GDPRDeleteUser = "gdpr_delete_user"
GDPRAuditLogs = "gdpr_audit_logs"
// These variables are temporary solution for issue: https://github.com/goharbor/harbor/issues/16039 // These variables are temporary solution for issue: https://github.com/goharbor/harbor/issues/16039
// When user disable the pull count/time/audit log, it will decrease the database access, especially in large concurrency pull scenarios. // When user disable the pull count/time/audit log, it will decrease the database access, especially in large concurrency pull scenarios.
@ -228,4 +231,19 @@ const (
ExecutionStatusRefreshIntervalSeconds = "execution_status_refresh_interval_seconds" ExecutionStatusRefreshIntervalSeconds = "execution_status_refresh_interval_seconds"
// QuotaUpdateProvider is the provider for updating quota, currently support Redis and DB // QuotaUpdateProvider is the provider for updating quota, currently support Redis and DB
QuotaUpdateProvider = "quota_update_provider" QuotaUpdateProvider = "quota_update_provider"
// IllegalCharsInUsername is the illegal chars in username
IllegalCharsInUsername = `,"~#%$`
// Beego web config
// BeegoMaxMemoryBytes is the max memory(bytes) of the beego web config
BeegoMaxMemoryBytes = "beego_max_memory_bytes"
// DefaultBeegoMaxMemoryBytes sets default max memory to 128GB
DefaultBeegoMaxMemoryBytes = 1 << 37
// BeegoMaxUploadSizeBytes is the max upload size(bytes) of the beego web config
BeegoMaxUploadSizeBytes = "beego_max_upload_size_bytes"
// DefaultBeegoMaxUploadSizeBytes sets default max upload size to 128GB
DefaultBeegoMaxUploadSizeBytes = 1 << 37
// Global Leeway used for token validation
JwtLeeway = 60 * time.Second
) )

View File

@ -14,6 +14,8 @@
package rbac package rbac
import "github.com/goharbor/harbor/src/pkg/permission/types"
// const action variables // const action variables
const ( const (
ActionAll = Action("*") // action match any other actions ActionAll = Action("*") // action match any other actions
@ -77,3 +79,153 @@ const (
ResourceJobServiceMonitor = Resource("jobservice-monitor") ResourceJobServiceMonitor = Resource("jobservice-monitor")
ResourceSecurityHub = Resource("security-hub") ResourceSecurityHub = Resource("security-hub")
) )
var (
PoliciesMap = map[string][]*types.Policy{
"System": {
{Resource: ResourceAuditLog, Action: ActionList},
{Resource: ResourcePreatInstance, Action: ActionRead},
{Resource: ResourcePreatInstance, Action: ActionCreate},
{Resource: ResourcePreatInstance, Action: ActionDelete},
{Resource: ResourcePreatInstance, Action: ActionList},
{Resource: ResourcePreatInstance, Action: ActionUpdate},
{Resource: ResourceProject, Action: ActionList},
{Resource: ResourceProject, Action: ActionCreate},
{Resource: ResourceReplicationPolicy, Action: ActionRead},
{Resource: ResourceReplicationPolicy, Action: ActionCreate},
{Resource: ResourceReplicationPolicy, Action: ActionDelete},
{Resource: ResourceReplicationPolicy, Action: ActionList},
{Resource: ResourceReplicationPolicy, Action: ActionUpdate},
{Resource: ResourceReplication, Action: ActionRead},
{Resource: ResourceReplication, Action: ActionCreate},
{Resource: ResourceReplication, Action: ActionList},
{Resource: ResourceReplicationAdapter, Action: ActionList},
{Resource: ResourceRegistry, Action: ActionRead},
{Resource: ResourceRegistry, Action: ActionCreate},
{Resource: ResourceRegistry, Action: ActionDelete},
{Resource: ResourceRegistry, Action: ActionList},
{Resource: ResourceRegistry, Action: ActionUpdate},
{Resource: ResourceScanAll, Action: ActionRead},
{Resource: ResourceScanAll, Action: ActionUpdate},
{Resource: ResourceScanAll, Action: ActionStop},
{Resource: ResourceScanAll, Action: ActionCreate},
{Resource: ResourceSystemVolumes, Action: ActionRead},
{Resource: ResourceGarbageCollection, Action: ActionRead},
{Resource: ResourceGarbageCollection, Action: ActionCreate},
{Resource: ResourceGarbageCollection, Action: ActionList},
{Resource: ResourceGarbageCollection, Action: ActionUpdate},
{Resource: ResourceGarbageCollection, Action: ActionStop},
{Resource: ResourcePurgeAuditLog, Action: ActionRead},
{Resource: ResourcePurgeAuditLog, Action: ActionCreate},
{Resource: ResourcePurgeAuditLog, Action: ActionList},
{Resource: ResourcePurgeAuditLog, Action: ActionUpdate},
{Resource: ResourcePurgeAuditLog, Action: ActionStop},
{Resource: ResourceJobServiceMonitor, Action: ActionList},
{Resource: ResourceJobServiceMonitor, Action: ActionStop},
{Resource: ResourceScanner, Action: ActionRead},
{Resource: ResourceScanner, Action: ActionCreate},
{Resource: ResourceScanner, Action: ActionDelete},
{Resource: ResourceScanner, Action: ActionList},
{Resource: ResourceScanner, Action: ActionUpdate},
{Resource: ResourceLabel, Action: ActionRead},
{Resource: ResourceLabel, Action: ActionCreate},
{Resource: ResourceLabel, Action: ActionDelete},
{Resource: ResourceLabel, Action: ActionUpdate},
{Resource: ResourceSecurityHub, Action: ActionRead},
{Resource: ResourceSecurityHub, Action: ActionList},
{Resource: ResourceCatalog, Action: ActionRead},
{Resource: ResourceQuota, Action: ActionRead},
{Resource: ResourceQuota, Action: ActionList},
},
"Project": {
{Resource: ResourceLog, Action: ActionList},
{Resource: ResourceProject, Action: ActionRead},
{Resource: ResourceProject, Action: ActionDelete},
{Resource: ResourceProject, Action: ActionUpdate},
{Resource: ResourceMetadata, Action: ActionRead},
{Resource: ResourceMetadata, Action: ActionCreate},
{Resource: ResourceMetadata, Action: ActionDelete},
{Resource: ResourceMetadata, Action: ActionList},
{Resource: ResourceMetadata, Action: ActionUpdate},
{Resource: ResourceRepository, Action: ActionRead},
{Resource: ResourceRepository, Action: ActionUpdate},
{Resource: ResourceRepository, Action: ActionDelete},
{Resource: ResourceRepository, Action: ActionList},
{Resource: ResourceRepository, Action: ActionPull},
{Resource: ResourceRepository, Action: ActionPush},
{Resource: ResourceArtifact, Action: ActionRead},
{Resource: ResourceArtifact, Action: ActionCreate},
{Resource: ResourceArtifact, Action: ActionList},
{Resource: ResourceArtifact, Action: ActionDelete},
{Resource: ResourceScan, Action: ActionCreate},
{Resource: ResourceScan, Action: ActionRead},
{Resource: ResourceScan, Action: ActionStop},
{Resource: ResourceTag, Action: ActionCreate},
{Resource: ResourceTag, Action: ActionList},
{Resource: ResourceTag, Action: ActionDelete},
{Resource: ResourceAccessory, Action: ActionList},
{Resource: ResourceArtifactAddition, Action: ActionRead},
{Resource: ResourceArtifactLabel, Action: ActionCreate},
{Resource: ResourceArtifactLabel, Action: ActionDelete},
{Resource: ResourceScanner, Action: ActionCreate},
{Resource: ResourceScanner, Action: ActionRead},
{Resource: ResourcePreatPolicy, Action: ActionRead},
{Resource: ResourcePreatPolicy, Action: ActionCreate},
{Resource: ResourcePreatPolicy, Action: ActionDelete},
{Resource: ResourcePreatPolicy, Action: ActionList},
{Resource: ResourcePreatPolicy, Action: ActionUpdate},
{Resource: ResourceImmutableTag, Action: ActionCreate},
{Resource: ResourceImmutableTag, Action: ActionDelete},
{Resource: ResourceImmutableTag, Action: ActionList},
{Resource: ResourceImmutableTag, Action: ActionUpdate},
{Resource: ResourceNotificationPolicy, Action: ActionRead},
{Resource: ResourceNotificationPolicy, Action: ActionCreate},
{Resource: ResourceNotificationPolicy, Action: ActionDelete},
{Resource: ResourceNotificationPolicy, Action: ActionList},
{Resource: ResourceNotificationPolicy, Action: ActionUpdate},
{Resource: ResourceTagRetention, Action: ActionRead},
{Resource: ResourceTagRetention, Action: ActionCreate},
{Resource: ResourceTagRetention, Action: ActionDelete},
{Resource: ResourceTagRetention, Action: ActionList},
{Resource: ResourceTagRetention, Action: ActionUpdate},
{Resource: ResourceLabel, Action: ActionRead},
{Resource: ResourceLabel, Action: ActionCreate},
{Resource: ResourceLabel, Action: ActionDelete},
{Resource: ResourceLabel, Action: ActionList},
{Resource: ResourceLabel, Action: ActionUpdate},
{Resource: ResourceQuota, Action: ActionRead},
},
}
)

View File

@ -284,6 +284,7 @@ var (
{Resource: rbac.ResourceQuota, Action: rbac.ActionRead}, {Resource: rbac.ResourceQuota, Action: rbac.ActionRead},
{Resource: rbac.ResourceRepository, Action: rbac.ActionList}, {Resource: rbac.ResourceRepository, Action: rbac.ActionList},
{Resource: rbac.ResourceRepository, Action: rbac.ActionRead},
{Resource: rbac.ResourceRepository, Action: rbac.ActionPull}, {Resource: rbac.ResourceRepository, Action: rbac.ActionPull},
{Resource: rbac.ResourceConfiguration, Action: rbac.ActionRead}, {Resource: rbac.ResourceConfiguration, Action: rbac.ActionRead},

View File

@ -111,7 +111,8 @@ func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource
} }
if len(sysPolicies) != 0 { if len(sysPolicies) != 0 {
evaluators = evaluators.Add(system.NewEvaluator(s.GetUsername(), sysPolicies)) evaluators = evaluators.Add(system.NewEvaluator(s.GetUsername(), sysPolicies))
} else if len(proPolicies) != 0 { }
if len(proPolicies) != 0 {
evaluators = evaluators.Add(rbac_project.NewEvaluator(s.ctl, rbac_project.NewBuilderForPolicies(s.GetUsername(), proPolicies))) evaluators = evaluators.Add(rbac_project.NewEvaluator(s.ctl, rbac_project.NewBuilderForPolicies(s.GetUsername(), proPolicies)))
} }
s.evaluator = evaluators s.evaluator = evaluators
@ -119,7 +120,6 @@ func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource
s.evaluator = rbac_project.NewEvaluator(s.ctl, rbac_project.NewBuilderForPolicies(s.GetUsername(), accesses, filterRobotPolicies)) s.evaluator = rbac_project.NewEvaluator(s.ctl, rbac_project.NewBuilderForPolicies(s.GetUsername(), accesses, filterRobotPolicies))
} }
}) })
return s.evaluator != nil && s.evaluator.HasPermission(ctx, resource, action) return s.evaluator != nil && s.evaluator.HasPermission(ctx, resource, action)
} }

View File

@ -24,6 +24,7 @@ import (
"github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/rbac/project" "github.com/goharbor/harbor/src/common/rbac/project"
"github.com/goharbor/harbor/src/common/rbac/system"
"github.com/goharbor/harbor/src/controller/robot" "github.com/goharbor/harbor/src/controller/robot"
"github.com/goharbor/harbor/src/pkg/permission/types" "github.com/goharbor/harbor/src/pkg/permission/types"
proModels "github.com/goharbor/harbor/src/pkg/project/models" proModels "github.com/goharbor/harbor/src/pkg/project/models"
@ -198,6 +199,57 @@ func TestHasPushPullPerm(t *testing.T) {
assert.True(t, ctx.Can(context.TODO(), rbac.ActionPush, resource) && ctx.Can(context.TODO(), rbac.ActionPull, resource)) assert.True(t, ctx.Can(context.TODO(), rbac.ActionPush, resource) && ctx.Can(context.TODO(), rbac.ActionPull, resource))
} }
func TestSysAndProPerm(t *testing.T) {
robot := &robot.Robot{
Level: "system",
Robot: model.Robot{
Name: "test_robot_4",
Description: "desc",
},
Permissions: []*robot.Permission{
{
Kind: "system",
Namespace: "/",
Access: []*types.Policy{
{
Resource: rbac.Resource(fmt.Sprintf("system/%s", rbac.ResourceRepository)),
Action: rbac.ActionList,
},
{
Resource: rbac.Resource(fmt.Sprintf("system/%s", rbac.ResourceGarbageCollection)),
Action: rbac.ActionCreate,
},
},
},
{
Kind: "project",
Namespace: "library",
Access: []*types.Policy{
{
Resource: rbac.Resource(fmt.Sprintf("project/%d/repository", private.ProjectID)),
Action: rbac.ActionPush,
},
{
Resource: rbac.Resource(fmt.Sprintf("project/%d/repository", private.ProjectID)),
Action: rbac.ActionPull,
},
},
},
},
}
ctl := &projecttesting.Controller{}
mock.OnAnything(ctl, "Get").Return(private, nil)
ctx := NewSecurityContext(robot)
ctx.ctl = ctl
resource := project.NewNamespace(private.ProjectID).Resource(rbac.ResourceRepository)
assert.True(t, ctx.Can(context.TODO(), rbac.ActionPush, resource) && ctx.Can(context.TODO(), rbac.ActionPull, resource))
resource = system.NewNamespace().Resource(rbac.ResourceGarbageCollection)
assert.True(t, ctx.Can(context.TODO(), rbac.ActionCreate, resource))
}
func Test_filterRobotPolicies(t *testing.T) { func Test_filterRobotPolicies(t *testing.T) {
type args struct { type args struct {
p *proModels.Project p *proModels.Project

View File

@ -77,7 +77,7 @@ func (s *SecurityContext) IsSolutionUser() bool {
// Can returns whether the user can do action on resource // Can returns whether the user can do action on resource
// returns true if the corresponding user of the secret // returns true if the corresponding user of the secret
// is jobservice or core service, otherwise returns false // is jobservice or core service, otherwise returns false
func (s *SecurityContext) Can(ctx context.Context, action types.Action, resource types.Resource) bool { func (s *SecurityContext) Can(_ context.Context, _ types.Action, _ types.Resource) bool {
if s.store == nil { if s.store == nil {
return false return false
} }

View File

@ -63,7 +63,7 @@ func (t *tokenSecurityCtx) GetMyProjects() ([]*models.Project, error) {
return []*models.Project{}, nil return []*models.Project{}, nil
} }
func (t *tokenSecurityCtx) GetProjectRoles(projectIDOrName interface{}) []int { func (t *tokenSecurityCtx) GetProjectRoles(_ interface{}) []int {
return []int{} return []int{}
} }

View File

@ -30,7 +30,7 @@ type GCResult struct {
} }
// NewRegistryCtl returns a mock registry server // NewRegistryCtl returns a mock registry server
func NewRegistryCtl(config map[string]interface{}) (*httptest.Server, error) { func NewRegistryCtl(_ map[string]interface{}) (*httptest.Server, error) {
m := []*RequestHandlerMapping{} m := []*RequestHandlerMapping{}
gcr := GCResult{true, "hello-world", time.Now(), time.Now()} gcr := GCResult{true, "hello-world", time.Now(), time.Now()}

View File

@ -49,7 +49,7 @@ func (fc *FakeClient) GetUserInfo(token string) (*UserInfo, error) {
} }
// UpdateConfig ... // UpdateConfig ...
func (fc *FakeClient) UpdateConfig(cfg *ClientConfig) error { func (fc *FakeClient) UpdateConfig(_ *ClientConfig) error {
return nil return nil
} }

View File

@ -247,16 +247,6 @@ func IsIllegalLength(s string, min int, max int) bool {
return (len(s) < min || len(s) > max) return (len(s) < min || len(s) > max)
} }
// IsContainIllegalChar ...
func IsContainIllegalChar(s string, illegalChar []string) bool {
for _, c := range illegalChar {
if strings.Contains(s, c) {
return true
}
}
return false
}
// ParseJSONInt ... // ParseJSONInt ...
func ParseJSONInt(value interface{}) (int, bool) { func ParseJSONInt(value interface{}) (int, bool) {
switch v := value.(type) { switch v := value.(type) {
@ -342,3 +332,8 @@ func MostMatchSorter(a, b string, matchWord string) bool {
} }
return len(a) < len(b) return len(a) < len(b)
} }
// IsLocalPath checks if path is local
func IsLocalPath(path string) bool {
return strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "//")
}

View File

@ -486,3 +486,25 @@ func TestValidateCronString(t *testing.T) {
} }
} }
} }
func TestIsLocalPath(t *testing.T) {
type args struct {
path string
}
tests := []struct {
name string
args args
want bool
}{
{"normal test", args{"/harbor/project"}, true},
{"failed", args{"www.myexample.com"}, false},
{"other_site1", args{"//www.myexample.com"}, false},
{"other_site2", args{"https://www.myexample.com"}, false},
{"other_site", args{"http://www.myexample.com"}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equalf(t, tt.want, IsLocalPath(tt.args.path), "IsLocalPath(%v)", tt.args.path)
})
}
}

View File

@ -39,7 +39,7 @@ type v1alpha1Parser struct {
regCli reg.Client regCli reg.Client
} }
func (p *v1alpha1Parser) Parse(ctx context.Context, artifact *artifact.Artifact, manifest []byte) error { func (p *v1alpha1Parser) Parse(_ context.Context, artifact *artifact.Artifact, manifest []byte) error {
if artifact.ManifestMediaType != v1.MediaTypeImageManifest && artifact.ManifestMediaType != schema2.MediaTypeManifest { if artifact.ManifestMediaType != v1.MediaTypeImageManifest && artifact.ManifestMediaType != schema2.MediaTypeManifest {
return nil return nil
} }

View File

@ -227,6 +227,7 @@ func (c *controller) ensureArtifact(ctx context.Context, repository, digest stri
if !errors.IsConflictErr(err) { if !errors.IsConflictErr(err) {
return false, nil, err return false, nil, err
} }
log.Debugf("failed to create artifact %s@%s: %v", repository, digest, err)
// if got conflict error, try to get the artifact again // if got conflict error, try to get the artifact again
artifact, err = c.artMgr.GetByDigest(ctx, repository, digest) artifact, err = c.artMgr.GetByDigest(ctx, repository, digest)
if err != nil { if err != nil {

View File

@ -237,6 +237,21 @@ func (c *controllerTestSuite) TestEnsureArtifact() {
c.Require().Nil(err) c.Require().Nil(err)
c.True(created) c.True(created)
c.Equal(int64(1), art.ID) c.Equal(int64(1), art.ID)
// reset the mock
c.SetupTest()
// the artifact doesn't exist and get a conflict error on creating the artifact and fail to get again
c.repoMgr.On("GetByName", mock.Anything, mock.Anything).Return(&repomodel.RepoRecord{
ProjectID: 1,
}, nil)
c.artMgr.On("GetByDigest", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.NotFoundError(nil))
c.artMgr.On("Create", mock.Anything, mock.Anything).Return(int64(1), errors.ConflictError(nil))
c.abstractor.On("AbstractMetadata").Return(nil)
created, art, err = c.ctl.ensureArtifact(orm.NewContext(nil, &ormtesting.FakeOrmer{}), "library/hello-world", digest)
c.Require().Error(err, errors.NotFoundError(nil))
c.False(created)
c.Require().Nil(art)
} }
func (c *controllerTestSuite) TestEnsure() { func (c *controllerTestSuite) TestEnsure() {

View File

@ -37,22 +37,22 @@ type IndexProcessor struct {
} }
// AbstractMetadata abstracts metadata of artifact // AbstractMetadata abstracts metadata of artifact
func (m *IndexProcessor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, content []byte) error { func (m *IndexProcessor) AbstractMetadata(_ context.Context, _ *artifact.Artifact, _ []byte) error {
return nil return nil
} }
// AbstractAddition abstracts the addition of artifact // AbstractAddition abstracts the addition of artifact
func (m *IndexProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) { func (m *IndexProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
return nil, errors.New(nil).WithCode(errors.BadRequestCode). return nil, errors.New(nil).WithCode(errors.BadRequestCode).
WithMessage("addition %s isn't supported", addition) WithMessage("addition %s isn't supported", addition)
} }
// GetArtifactType returns the artifact type // GetArtifactType returns the artifact type
func (m *IndexProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (m *IndexProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return "" return ""
} }
// ListAdditionTypes returns the supported addition types // ListAdditionTypes returns the supported addition types
func (m *IndexProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (m *IndexProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return nil return nil
} }

View File

@ -64,23 +64,23 @@ func (m *ManifestProcessor) AbstractMetadata(ctx context.Context, artifact *arti
} }
// AbstractAddition abstracts the addition of artifact // AbstractAddition abstracts the addition of artifact
func (m *ManifestProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) { func (m *ManifestProcessor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
return nil, errors.New(nil).WithCode(errors.BadRequestCode). return nil, errors.New(nil).WithCode(errors.BadRequestCode).
WithMessage("addition %s isn't supported", addition) WithMessage("addition %s isn't supported", addition)
} }
// GetArtifactType returns the artifact type // GetArtifactType returns the artifact type
func (m *ManifestProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (m *ManifestProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return "" return ""
} }
// ListAdditionTypes returns the supported addition types // ListAdditionTypes returns the supported addition types
func (m *ManifestProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (m *ManifestProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return nil return nil
} }
// UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v" // UnmarshalConfig unmarshal the config blob of the artifact into the specified object "v"
func (m *ManifestProcessor) UnmarshalConfig(ctx context.Context, repository string, manifest []byte, v interface{}) error { func (m *ManifestProcessor) UnmarshalConfig(_ context.Context, repository string, manifest []byte, v interface{}) error {
// unmarshal manifest // unmarshal manifest
mani := &v1.Manifest{} mani := &v1.Manifest{}
if err := json.Unmarshal(manifest, mani); err != nil { if err := json.Unmarshal(manifest, mani); err != nil {

View File

@ -58,7 +58,7 @@ type processor struct {
chartOperator chart.Operator chartOperator chart.Operator
} }
func (p *processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) { func (p *processor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, addition string) (*ps.Addition, error) {
if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies { if addition != AdditionTypeValues && addition != AdditionTypeReadme && addition != AdditionTypeDependencies {
return nil, errors.New(nil).WithCode(errors.BadRequestCode). return nil, errors.New(nil).WithCode(errors.BadRequestCode).
WithMessage("addition %s isn't supported for %s", addition, ArtifactTypeChart) WithMessage("addition %s isn't supported for %s", addition, ArtifactTypeChart)
@ -122,10 +122,10 @@ func (p *processor) AbstractAddition(ctx context.Context, artifact *artifact.Art
return nil, nil return nil, nil
} }
func (p *processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (p *processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeChart return ArtifactTypeChart
} }
func (p *processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (p *processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return []string{AdditionTypeValues, AdditionTypeReadme, AdditionTypeDependencies} return []string{AdditionTypeValues, AdditionTypeReadme, AdditionTypeDependencies}
} }

View File

@ -45,7 +45,7 @@ type processor struct {
manifestProcessor *base.ManifestProcessor manifestProcessor *base.ManifestProcessor
} }
func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, manifest []byte) error { func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact, _ []byte) error {
cfgManiDgt := "" cfgManiDgt := ""
// try to get the digest of the manifest that the config layer is referenced by // try to get the digest of the manifest that the config layer is referenced by
for _, reference := range art.References { for _, reference := range art.References {
@ -72,6 +72,6 @@ func (p *processor) AbstractMetadata(ctx context.Context, art *artifact.Artifact
return p.manifestProcessor.AbstractMetadata(ctx, art, payload) return p.manifestProcessor.AbstractMetadata(ctx, art, payload)
} }
func (p *processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (p *processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeCNAB return ArtifactTypeCNAB
} }

View File

@ -48,7 +48,7 @@ type defaultProcessor struct {
regCli registry.Client regCli registry.Client
} }
func (d *defaultProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (d *defaultProcessor) GetArtifactType(_ context.Context, artifact *artifact.Artifact) string {
// try to parse the type from the media type // try to parse the type from the media type
strs := artifactTypeRegExp.FindStringSubmatch(artifact.MediaType) strs := artifactTypeRegExp.FindStringSubmatch(artifact.MediaType)
if len(strs) == 2 { if len(strs) == 2 {
@ -57,7 +57,7 @@ func (d *defaultProcessor) GetArtifactType(ctx context.Context, artifact *artifa
// can not get the artifact type from the media type, return unknown // can not get the artifact type from the media type, return unknown
return ArtifactTypeUnknown return ArtifactTypeUnknown
} }
func (d *defaultProcessor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (d *defaultProcessor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return nil return nil
} }
@ -127,7 +127,7 @@ func (d *defaultProcessor) AbstractMetadata(ctx context.Context, artifact *artif
return nil return nil
} }
func (d *defaultProcessor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*Addition, error) { func (d *defaultProcessor) AbstractAddition(_ context.Context, artifact *artifact.Artifact, _ string) (*Addition, error) {
// Addition not support for user-defined artifact yet. // Addition not support for user-defined artifact yet.
// It will be support in the future. // It will be support in the future.
// return error directly // return error directly

View File

@ -44,6 +44,6 @@ type indexProcessor struct {
*base.IndexProcessor *base.IndexProcessor
} }
func (i *indexProcessor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (i *indexProcessor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeImage return ArtifactTypeImage
} }

View File

@ -38,7 +38,7 @@ func init() {
type manifestV1Processor struct { type manifestV1Processor struct {
} }
func (m *manifestV1Processor) AbstractMetadata(ctx context.Context, artifact *artifact.Artifact, manifest []byte) error { func (m *manifestV1Processor) AbstractMetadata(_ context.Context, artifact *artifact.Artifact, manifest []byte) error {
mani := &schema1.Manifest{} mani := &schema1.Manifest{}
if err := json.Unmarshal(manifest, mani); err != nil { if err := json.Unmarshal(manifest, mani); err != nil {
return err return err
@ -50,15 +50,15 @@ func (m *manifestV1Processor) AbstractMetadata(ctx context.Context, artifact *ar
return nil return nil
} }
func (m *manifestV1Processor) AbstractAddition(ctx context.Context, artifact *artifact.Artifact, addition string) (*processor.Addition, error) { func (m *manifestV1Processor) AbstractAddition(_ context.Context, _ *artifact.Artifact, addition string) (*processor.Addition, error) {
return nil, errors.New(nil).WithCode(errors.BadRequestCode). return nil, errors.New(nil).WithCode(errors.BadRequestCode).
WithMessage("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage) WithMessage("addition %s isn't supported for %s(manifest version 1)", addition, ArtifactTypeImage)
} }
func (m *manifestV1Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (m *manifestV1Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeImage return ArtifactTypeImage
} }
func (m *manifestV1Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (m *manifestV1Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return nil return nil
} }

View File

@ -112,10 +112,10 @@ func (m *manifestV2Processor) AbstractAddition(ctx context.Context, artifact *ar
}, nil }, nil
} }
func (m *manifestV2Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (m *manifestV2Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeImage return ArtifactTypeImage
} }
func (m *manifestV2Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (m *manifestV2Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return []string{AdditionTypeBuildHistory} return []string{AdditionTypeBuildHistory}
} }

View File

@ -128,10 +128,10 @@ func (m *Processor) AbstractAddition(ctx context.Context, artifact *artifact.Art
}, nil }, nil
} }
func (m *Processor) GetArtifactType(ctx context.Context, artifact *artifact.Artifact) string { func (m *Processor) GetArtifactType(_ context.Context, _ *artifact.Artifact) string {
return ArtifactTypeWASM return ArtifactTypeWASM
} }
func (m *Processor) ListAdditionTypes(ctx context.Context, artifact *artifact.Artifact) []string { func (m *Processor) ListAdditionTypes(_ context.Context, _ *artifact.Artifact) []string {
return []string{AdditionTypeBuildHistory} return []string{AdditionTypeBuildHistory}
} }

View File

@ -169,7 +169,7 @@ func verifySkipAuditLogCfg(ctx context.Context, cfgs map[string]interface{}, mgr
} }
// verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend. // verifyValueLengthCfg verifies the cfgs which need to check the value max length to align with frontend.
func verifyValueLengthCfg(ctx context.Context, cfgs map[string]interface{}) error { func verifyValueLengthCfg(_ context.Context, cfgs map[string]interface{}) error {
maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber) maxValue := maxValueLimitedByLength(common.UIMaxLengthLimitedOfNumber)
validateCfgs := []string{ validateCfgs := []string{
common.TokenExpiration, common.TokenExpiration,

View File

@ -159,7 +159,7 @@ func (a *ArtifactEventHandler) onPull(ctx context.Context, event *event.Artifact
return nil return nil
} }
func (a *ArtifactEventHandler) updatePullTimeInCache(ctx context.Context, event *event.ArtifactEvent) { func (a *ArtifactEventHandler) updatePullTimeInCache(_ context.Context, event *event.ArtifactEvent) {
var tagName string var tagName string
if len(event.Tags) != 0 { if len(event.Tags) != 0 {
tagName = event.Tags[0] tagName = event.Tags[0]
@ -173,7 +173,7 @@ func (a *ArtifactEventHandler) updatePullTimeInCache(ctx context.Context, event
a.pullTimeStore[key] = time.Now() a.pullTimeStore[key] = time.Now()
} }
func (a *ArtifactEventHandler) addPullCountInCache(ctx context.Context, event *event.ArtifactEvent) { func (a *ArtifactEventHandler) addPullCountInCache(_ context.Context, event *event.ArtifactEvent) {
a.pullCountLock.Lock() a.pullCountLock.Lock()
defer a.pullCountLock.Unlock() defer a.pullCountLock.Unlock()

View File

@ -21,6 +21,7 @@ import (
"github.com/goharbor/harbor/src/controller/immutable" "github.com/goharbor/harbor/src/controller/immutable"
"github.com/goharbor/harbor/src/controller/retention" "github.com/goharbor/harbor/src/controller/retention"
"github.com/goharbor/harbor/src/lib/log" "github.com/goharbor/harbor/src/lib/log"
"github.com/goharbor/harbor/src/pkg/member"
) )
// ProjectEventHandler process project event data // ProjectEventHandler process project event data
@ -39,16 +40,15 @@ func (a *ProjectEventHandler) IsStateful() bool {
func (a *ProjectEventHandler) onProjectDelete(ctx context.Context, event *event.DeleteProjectEvent) error { func (a *ProjectEventHandler) onProjectDelete(ctx context.Context, event *event.DeleteProjectEvent) error {
log.Infof("delete project id: %d", event.ProjectID) log.Infof("delete project id: %d", event.ProjectID)
// delete tag immutable if err := immutable.Ctr.DeleteImmutableRuleByProject(ctx, event.ProjectID); err != nil {
err := immutable.Ctr.DeleteImmutableRuleByProject(ctx, event.ProjectID)
if err != nil {
log.Errorf("failed to delete immutable rule, error %v", err) log.Errorf("failed to delete immutable rule, error %v", err)
} }
// delete tag retention if err := retention.Ctl.DeleteRetentionByProject(ctx, event.ProjectID); err != nil {
err = retention.Ctl.DeleteRetentionByProject(ctx, event.ProjectID)
if err != nil {
log.Errorf("failed to delete retention rule, error %v", err) log.Errorf("failed to delete retention rule, error %v", err)
} }
if err := member.Mgr.DeleteMemberByProjectID(ctx, event.ProjectID); err != nil {
log.Errorf("failed to delete project member, error %v", err)
}
return nil return nil
} }

View File

@ -21,7 +21,9 @@ import (
beegoorm "github.com/beego/beego/v2/client/orm" beegoorm "github.com/beego/beego/v2/client/orm"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"github.com/goharbor/harbor/src/common"
common_dao "github.com/goharbor/harbor/src/common/dao" common_dao "github.com/goharbor/harbor/src/common/dao"
commonmodels "github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/controller/event" "github.com/goharbor/harbor/src/controller/event"
"github.com/goharbor/harbor/src/controller/immutable" "github.com/goharbor/harbor/src/controller/immutable"
"github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/config"
@ -29,11 +31,14 @@ import (
"github.com/goharbor/harbor/src/pkg" "github.com/goharbor/harbor/src/pkg"
"github.com/goharbor/harbor/src/pkg/artifact" "github.com/goharbor/harbor/src/pkg/artifact"
immutableModel "github.com/goharbor/harbor/src/pkg/immutable/model" immutableModel "github.com/goharbor/harbor/src/pkg/immutable/model"
"github.com/goharbor/harbor/src/pkg/member"
memberModels "github.com/goharbor/harbor/src/pkg/member/models"
"github.com/goharbor/harbor/src/pkg/project" "github.com/goharbor/harbor/src/pkg/project"
"github.com/goharbor/harbor/src/pkg/project/models" "github.com/goharbor/harbor/src/pkg/project/models"
"github.com/goharbor/harbor/src/pkg/repository/model" "github.com/goharbor/harbor/src/pkg/repository/model"
"github.com/goharbor/harbor/src/pkg/tag" "github.com/goharbor/harbor/src/pkg/tag"
tagmodel "github.com/goharbor/harbor/src/pkg/tag/model/tag" tagmodel "github.com/goharbor/harbor/src/pkg/tag/model/tag"
"github.com/goharbor/harbor/src/pkg/user"
) )
// ProjectHandlerTestSuite is test suite for artifact handler. // ProjectHandlerTestSuite is test suite for artifact handler.
@ -81,6 +86,18 @@ func (suite *ProjectHandlerTestSuite) TestOnProjectDelete() {
projID, err := project.New().Create(suite.ctx, &models.Project{Name: "test-project", OwnerID: 1}) projID, err := project.New().Create(suite.ctx, &models.Project{Name: "test-project", OwnerID: 1})
suite.Nil(err) suite.Nil(err)
userID, err := user.Mgr.Create(suite.ctx, &commonmodels.User{Username: "test-user-event", Email: "test-user-event@example.com"})
defer user.Mgr.Delete(suite.ctx, userID)
// create project member
_, err = member.Mgr.AddProjectMember(suite.ctx, memberModels.Member{ProjectID: projID, EntityType: common.UserMember, EntityID: userID, Role: 1})
suite.Nil(err)
// verify project member
members, err := member.Mgr.SearchMemberByName(suite.ctx, projID, "test-user-event")
suite.Nil(err)
suite.Equal(1, len(members))
defer project.New().Delete(suite.ctx, projID) defer project.New().Delete(suite.ctx, projID)
immutableRule := &immutableModel.Metadata{ immutableRule := &immutableModel.Metadata{
ProjectID: projID, ProjectID: projID,
@ -116,6 +133,11 @@ func (suite *ProjectHandlerTestSuite) TestOnProjectDelete() {
// check if immutable rule is deleted // check if immutable rule is deleted
_, err = immutable.Ctr.GetImmutableRule(suite.ctx, immutableID) _, err = immutable.Ctr.GetImmutableRule(suite.ctx, immutableID)
suite.NotNil(err) suite.NotNil(err)
// check if project member is deleted
mbs, err := member.Mgr.SearchMemberByName(suite.ctx, projID, "test-user-event")
suite.Nil(err)
suite.Equal(0, len(mbs))
} }
// TestArtifactHandler tests ArtifactHandler. // TestArtifactHandler tests ArtifactHandler.

View File

@ -52,7 +52,7 @@ func gcCallback(ctx context.Context, p string) error {
return err return err
} }
func gcTaskStatusChange(ctx context.Context, taskID int64, status string) error { func gcTaskStatusChange(ctx context.Context, _ int64, status string) error {
if status == job.SuccessStatus.String() && config.QuotaPerProjectEnable(ctx) { if status == job.SuccessStatus.String() && config.QuotaPerProjectEnable(ctx) {
go func() { go func() {
err := quota.RefreshForProjects(orm.Context()) err := quota.RefreshForProjects(orm.Context())

View File

@ -41,7 +41,7 @@ type Controller interface {
type controller struct{} type controller struct{}
func (c *controller) GetHealth(ctx context.Context) *OverallHealthStatus { func (c *controller) GetHealth(_ context.Context) *OverallHealthStatus {
var isHealthy healthy = true var isHealthy healthy = true
components := []*ComponentHealthStatus{} components := []*ComponentHealthStatus{}
ch := make(chan *ComponentHealthStatus, len(registry)) ch := make(chan *ComponentHealthStatus, len(registry))

View File

@ -32,18 +32,20 @@ import (
// Controller defines the operation related to project member // Controller defines the operation related to project member
type Controller interface { type Controller interface {
// Get get the project member with ID // Get gets the project member with ID
Get(ctx context.Context, projectNameOrID interface{}, memberID int) (*models.Member, error) Get(ctx context.Context, projectNameOrID interface{}, memberID int) (*models.Member, error)
// Create add project member to project // Create add project member to project
Create(ctx context.Context, projectNameOrID interface{}, req Request) (int, error) Create(ctx context.Context, projectNameOrID interface{}, req Request) (int, error)
// Delete member from project // Delete member from project
Delete(ctx context.Context, projectNameOrID interface{}, memberID int) error Delete(ctx context.Context, projectNameOrID interface{}, memberID int) error
// List list all project members with condition // List lists all project members with condition
List(ctx context.Context, projectNameOrID interface{}, entityName string, query *q.Query) ([]*models.Member, error) List(ctx context.Context, projectNameOrID interface{}, entityName string, query *q.Query) ([]*models.Member, error)
// UpdateRole update the project member role // UpdateRole update the project member role
UpdateRole(ctx context.Context, projectNameOrID interface{}, memberID int, role int) error UpdateRole(ctx context.Context, projectNameOrID interface{}, memberID int, role int) error
// Count get the total amount of project members // Count get the total amount of project members
Count(ctx context.Context, projectNameOrID interface{}, query *q.Query) (int, error) Count(ctx context.Context, projectNameOrID interface{}, query *q.Query) (int, error)
// IsProjectAdmin judges if the user is a project admin of any project
IsProjectAdmin(ctx context.Context, memberID int) (bool, error)
} }
// Request - Project Member Request // Request - Project Member Request
@ -258,3 +260,12 @@ func (c *controller) Delete(ctx context.Context, projectNameOrID interface{}, me
} }
return c.mgr.Delete(ctx, p.ProjectID, memberID) return c.mgr.Delete(ctx, p.ProjectID, memberID)
} }
func (c *controller) IsProjectAdmin(ctx context.Context, memberID int) (bool, error) {
members, err := c.projectMgr.ListAdminRolesOfUser(ctx, memberID)
if err != nil {
return false, err
}
return len(members) > 0, nil
}

View File

@ -15,6 +15,7 @@
package member package member
import ( import (
"context"
"fmt" "fmt"
"testing" "testing"
@ -95,6 +96,13 @@ func (suite *MemberControllerTestSuite) TestAddProjectMemberWithUserGroup() {
suite.NoError(err) suite.NoError(err)
} }
func (suite *MemberControllerTestSuite) TestIsProjectAdmin() {
mock.OnAnything(suite.projectMgr, "ListAdminRolesOfUser").Return([]models.Member{models.Member{ID: 2, ProjectID: 2}}, nil)
ok, err := suite.controller.IsProjectAdmin(context.Background(), 2)
suite.NoError(err)
suite.True(ok)
}
func TestMemberControllerTestSuite(t *testing.T) { func TestMemberControllerTestSuite(t *testing.T) {
suite.Run(t, &MemberControllerTestSuite{}) suite.Run(t, &MemberControllerTestSuite{})
} }

View File

@ -495,7 +495,7 @@ func (c *controller) ListPoliciesByProject(ctx context.Context, project int64, q
} }
// CheckHealth checks the instance health, for test connection // CheckHealth checks the instance health, for test connection
func (c *controller) CheckHealth(ctx context.Context, instance *providerModels.Instance) error { func (c *controller) CheckHealth(_ context.Context, instance *providerModels.Instance) error {
if instance == nil { if instance == nil {
return errors.New("instance can not be nil") return errors.New("instance can not be nil")
} }

View File

@ -150,9 +150,9 @@ func (c *controller) Exists(ctx context.Context, projectIDOrName interface{}) (b
return true, nil return true, nil
} else if errors.IsNotFoundErr(err) { } else if errors.IsNotFoundErr(err) {
return false, nil return false, nil
} else {
return false, err
} }
// else
return false, err
} }
func (c *controller) Get(ctx context.Context, projectIDOrName interface{}, options ...Option) (*models.Project, error) { func (c *controller) Get(ctx context.Context, projectIDOrName interface{}, options ...Option) (*models.Project, error) {

View File

@ -101,7 +101,7 @@ func (c *controller) EnsureTag(ctx context.Context, art lib.ArtifactInfo, tagNam
// search the digest in cache and query with trimmed digest // search the digest in cache and query with trimmed digest
var trimmedDigest string var trimmedDigest string
err := c.cache.Fetch(ctx, TrimmedManifestlist+art.Digest, &trimmedDigest) err := c.cache.Fetch(ctx, TrimmedManifestlist+art.Digest, &trimmedDigest)
if errors.Is(err, cache.ErrNotFound) { if errors.Is(err, cache.ErrNotFound) { // nolint:revive
// skip to update digest, continue // skip to update digest, continue
} else if err != nil { } else if err != nil {
// for other error, return // for other error, return
@ -183,7 +183,10 @@ func (c *controller) UseLocalManifest(ctx context.Context, art lib.ArtifactInfo,
if c.cache == nil { if c.cache == nil {
return a != nil && string(desc.Digest) == a.Digest, nil, nil // digest matches return a != nil && string(desc.Digest) == a.Digest, nil, nil // digest matches
} }
// Pass digest to the cache key, digest is more stable than tag, because tag could be updated
if len(art.Digest) == 0 {
art.Digest = string(desc.Digest)
}
err = c.cache.Fetch(ctx, manifestListKey(art.Repository, art), &content) err = c.cache.Fetch(ctx, manifestListKey(art.Repository, art), &content)
if err != nil { if err != nil {
if errors.Is(err, cache.ErrNotFound) { if errors.Is(err, cache.ErrNotFound) {
@ -260,7 +263,7 @@ func (c *controller) ProxyManifest(ctx context.Context, art lib.ArtifactInfo, re
return man, nil return man, nil
} }
func (c *controller) HeadManifest(ctx context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *distribution.Descriptor, error) { func (c *controller) HeadManifest(_ context.Context, art lib.ArtifactInfo, remote RemoteInterface) (bool, *distribution.Descriptor, error) {
remoteRepo := getRemoteRepo(art) remoteRepo := getRemoteRepo(art)
ref := getReference(art) ref := getReference(art)
return remote.ManifestExist(remoteRepo, ref) return remote.ManifestExist(remoteRepo, ref)
@ -318,8 +321,8 @@ func getRemoteRepo(art lib.ArtifactInfo) string {
} }
func getReference(art lib.ArtifactInfo) string { func getReference(art lib.ArtifactInfo) string {
if len(art.Tag) > 0 { if len(art.Digest) > 0 {
return art.Tag return art.Digest
} }
return art.Digest return art.Tag
} }

View File

@ -209,7 +209,7 @@ func TestGetRef(t *testing.T) {
{ {
name: `normal`, name: `normal`,
in: lib.ArtifactInfo{Repository: "hello-world", Tag: "latest", Digest: "sha256:aabbcc"}, in: lib.ArtifactInfo{Repository: "hello-world", Tag: "latest", Digest: "sha256:aabbcc"},
want: "latest", want: "sha256:aabbcc",
}, },
{ {
name: `digest_only`, name: `digest_only`,

View File

@ -81,7 +81,7 @@ func newLocalHelper() localInterface {
return l return l
} }
func (l *localHelper) BlobExist(ctx context.Context, art lib.ArtifactInfo) (bool, error) { func (l *localHelper) BlobExist(_ context.Context, art lib.ArtifactInfo) (bool, error) {
return l.registry.BlobExist(art.Repository, art.Digest) return l.registry.BlobExist(art.Repository, art.Digest)
} }

View File

@ -63,7 +63,7 @@ type ManifestListCache struct {
} }
// CacheContent ... // CacheContent ...
func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) { func (m *ManifestListCache) CacheContent(ctx context.Context, _ string, man distribution.Manifest, art lib.ArtifactInfo, _ RemoteInterface, contentType string) {
_, payload, err := man.Payload() _, payload, err := man.Payload()
if err != nil { if err != nil {
log.Errorf("failed to get payload, error %v", err) log.Errorf("failed to get payload, error %v", err)
@ -73,7 +73,10 @@ func (m *ManifestListCache) CacheContent(ctx context.Context, remoteRepo string,
log.Errorf("failed to get reference, reference is empty, skip to cache manifest list") log.Errorf("failed to get reference, reference is empty, skip to cache manifest list")
return return
} }
// some registry will not return the digest in the HEAD request, if no digest returned, cache manifest list content with tag // cache key should contain digest if digest exist
if len(art.Digest) == 0 {
art.Digest = string(digest.FromBytes(payload))
}
key := manifestListKey(art.Repository, art) key := manifestListKey(art.Repository, art)
log.Debugf("cache manifest list with key=cache:%v", key) log.Debugf("cache manifest list with key=cache:%v", key)
if err := m.cache.Save(ctx, manifestListContentTypeKey(art.Repository, art), contentType, manifestListCacheInterval); err != nil { if err := m.cache.Save(ctx, manifestListContentTypeKey(art.Repository, art), contentType, manifestListCacheInterval); err != nil {
@ -171,7 +174,7 @@ type ManifestCache struct {
} }
// CacheContent ... // CacheContent ...
func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, contentType string) { func (m *ManifestCache) CacheContent(ctx context.Context, remoteRepo string, man distribution.Manifest, art lib.ArtifactInfo, r RemoteInterface, _ string) {
var waitBlobs []distribution.Descriptor var waitBlobs []distribution.Descriptor
for n := 0; n < maxManifestWait; n++ { for n := 0; n < maxManifestWait; n++ {
time.Sleep(sleepIntervalSec * time.Second) time.Sleep(sleepIntervalSec * time.Second)

View File

@ -464,7 +464,7 @@ func (c *controller) Update(ctx context.Context, u *quota.Quota) error {
} }
// Driver returns quota driver for the reference // Driver returns quota driver for the reference
func Driver(ctx context.Context, reference string) (driver.Driver, error) { func Driver(_ context.Context, reference string) (driver.Driver, error) {
d, ok := driver.Get(reference) d, ok := driver.Get(reference)
if !ok { if !ok {
return nil, fmt.Errorf("quota not support for %s", reference) return nil, fmt.Errorf("quota not support for %s", reference)

View File

@ -43,7 +43,7 @@ type driver struct {
blobCtl blob.Controller blobCtl blob.Controller
} }
func (d *driver) Enabled(ctx context.Context, key string) (bool, error) { func (d *driver) Enabled(ctx context.Context, _ string) (bool, error) {
// NOTE: every time load the new configurations from the db to get the latest configurations may have performance problem. // NOTE: every time load the new configurations from the db to get the latest configurations may have performance problem.
if err := d.cfg.Load(ctx); err != nil { if err := d.cfg.Load(ctx); err != nil {
return false, err return false, err

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.22.1. DO NOT EDIT. // Code generated by mockery v2.35.4. DO NOT EDIT.
package flow package flow
@ -56,13 +56,12 @@ func (_m *mockFactory) Create(_a0 *model.Registry) (adapter.Adapter, error) {
return r0, r1 return r0, r1
} }
type mockConstructorTestingTnewMockFactory interface { // newMockFactory creates a new instance of mockFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func newMockFactory(t interface {
mock.TestingT mock.TestingT
Cleanup(func()) Cleanup(func())
} }) *mockFactory {
// newMockFactory creates a new instance of mockFactory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func newMockFactory(t mockConstructorTestingTnewMockFactory) *mockFactory {
mock := &mockFactory{} mock := &mockFactory{}
mock.Mock.Test(t) mock.Mock.Test(t)

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.22.1. DO NOT EDIT. // Code generated by mockery v2.35.4. DO NOT EDIT.
package flow package flow
@ -438,13 +438,12 @@ func (_m *mockAdapter) PushManifest(repository string, reference string, mediaTy
return r0, r1 return r0, r1
} }
type mockConstructorTestingTnewMockAdapter interface { // newMockAdapter creates a new instance of mockAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func newMockAdapter(t interface {
mock.TestingT mock.TestingT
Cleanup(func()) Cleanup(func())
} }) *mockAdapter {
// newMockAdapter creates a new instance of mockAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func newMockAdapter(t mockConstructorTestingTnewMockAdapter) *mockAdapter {
mock := &mockAdapter{} mock := &mockAdapter{}
mock.Mock.Test(t) mock.Mock.Test(t)

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.22.1. DO NOT EDIT. // Code generated by mockery v2.35.4. DO NOT EDIT.
package replication package replication
@ -31,13 +31,12 @@ func (_m *flowController) Start(ctx context.Context, executionID int64, policy *
return r0 return r0
} }
type mockConstructorTestingTnewFlowController interface { // newFlowController creates a new instance of flowController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
// The first argument is typically a *testing.T value.
func newFlowController(t interface {
mock.TestingT mock.TestingT
Cleanup(func()) Cleanup(func())
} }) *flowController {
// newFlowController creates a new instance of flowController. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func newFlowController(t mockConstructorTestingTnewFlowController) *flowController {
mock := &flowController{} mock := &flowController{}
mock.Mock.Test(t) mock.Mock.Test(t)

View File

@ -326,7 +326,7 @@ func (t *transfer) copyChunkWithRetry(srcRepo, dstRepo, digest string, sizeFromD
} }
// tryMountBlob try to check existence and mount, return true if mounted. // tryMountBlob try to check existence and mount, return true if mounted.
func (t *transfer) tryMountBlob(srcRepo, dstRepo, digest string) (bool, error) { func (t *transfer) tryMountBlob(_, dstRepo, digest string) (bool, error) {
if t.shouldStop() { if t.shouldStop() {
return false, errStopped return false, errStopped
} }

View File

@ -23,7 +23,10 @@ import (
"github.com/goharbor/harbor/src/controller/event/operator" "github.com/goharbor/harbor/src/controller/event/operator"
"github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/logger" "github.com/goharbor/harbor/src/jobservice/logger"
"github.com/goharbor/harbor/src/lib"
"github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/lib/retry"
"github.com/goharbor/harbor/src/pkg" "github.com/goharbor/harbor/src/pkg"
"github.com/goharbor/harbor/src/pkg/project" "github.com/goharbor/harbor/src/pkg/project"
"github.com/goharbor/harbor/src/pkg/repository" "github.com/goharbor/harbor/src/pkg/repository"
@ -80,6 +83,7 @@ type defaultController struct {
projectManager project.Manager projectManager project.Manager
repositoryMgr repository.Manager repositoryMgr repository.Manager
scheduler scheduler.Scheduler scheduler scheduler.Scheduler
wp *lib.WorkerPool
} }
const ( const (
@ -248,21 +252,49 @@ func (r *defaultController) TriggerRetentionExec(ctx context.Context, policyID i
"dry_run": dryRun, "dry_run": dryRun,
"operator": operator.FromContext(ctx), "operator": operator.FromContext(ctx),
} }
id, err := r.execMgr.Create(ctx, job.RetentionVendorType, policyID, trigger, extra) id, err := r.execMgr.Create(ctx, job.RetentionVendorType, policyID, trigger, extra)
if num, err := r.launcher.Launch(ctx, p, id, dryRun); err != nil { if err != nil {
if err1 := r.execMgr.StopAndWait(ctx, id, 10*time.Second); err1 != nil {
logger.Errorf("failed to stop the retention execution %d: %v", id, err1)
}
if err1 := r.execMgr.MarkError(ctx, id, err.Error()); err1 != nil {
logger.Errorf("failed to mark error for the retention execution %d: %v", id, err1)
}
return 0, err return 0, err
} else if num == 0 {
// no candidates, mark the execution as done directly
if err := r.execMgr.MarkDone(ctx, id, "no resources for retention"); err != nil {
logger.Errorf("failed to mark done for the execution %d: %v", id, err)
}
} }
go func() {
r.wp.GetWorker()
defer r.wp.ReleaseWorker()
// copy the context to request a new ormer
ctx = orm.Copy(ctx)
// as we start a new transaction in the goroutine, the execution record may not
// be inserted yet, wait until it is ready before continue
if err := retry.Retry(func() error {
_, err := r.execMgr.Get(ctx, id)
return err
}); err != nil {
markErr := r.execMgr.MarkError(ctx, id, fmt.Sprintf(
"failed to wait the execution record to be inserted: %v", err))
if markErr != nil {
logger.Errorf("failed to mark the status of execution %d to error: %v", id, markErr)
}
return
}
if num, err := r.launcher.Launch(ctx, p, id, dryRun); err != nil {
logger.Errorf("failed to launch the retention jobs, err: %v", err)
if err = r.execMgr.StopAndWait(ctx, id, 10*time.Second); err != nil {
logger.Errorf("failed to stop the retention execution %d: %v", id, err)
}
if err = r.execMgr.MarkError(ctx, id, err.Error()); err != nil {
logger.Errorf("failed to mark error for the retention execution %d: %v", id, err)
}
} else if num == 0 {
// no candidates, mark the execution as done directly
if err := r.execMgr.MarkDone(ctx, id, "no resources for retention"); err != nil {
logger.Errorf("failed to mark done for the execution %d: %v", id, err)
}
}
}()
return id, err return id, err
} }
@ -434,5 +466,6 @@ func NewController() Controller {
projectManager: pkg.ProjectMgr, projectManager: pkg.ProjectMgr,
repositoryMgr: pkg.RepositoryMgr, repositoryMgr: pkg.RepositoryMgr,
scheduler: scheduler.Sched, scheduler: scheduler.Sched,
wp: lib.NewWorkerPool(10),
} }
} }

View File

@ -25,6 +25,7 @@ import (
"github.com/goharbor/harbor/src/common/dao" "github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/jobservice/job" "github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/lib"
"github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/lib/orm"
"github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/retention" "github.com/goharbor/harbor/src/pkg/retention"
@ -238,6 +239,7 @@ func (s *ControllerTestSuite) TestExecution() {
projectManager: projectMgr, projectManager: projectMgr,
repositoryMgr: repositoryMgr, repositoryMgr: repositoryMgr,
scheduler: retentionScheduler, scheduler: retentionScheduler,
wp: lib.NewWorkerPool(10),
} }
p1 := &policy.Metadata{ p1 := &policy.Metadata{

View File

@ -102,10 +102,6 @@ func (d *controller) Create(ctx context.Context, r *Robot) (int64, string, error
var expiresAt int64 var expiresAt int64
if r.Duration == -1 { if r.Duration == -1 {
expiresAt = -1 expiresAt = -1
} else if r.Duration == 0 {
// system default robot duration
r.Duration = int64(config.RobotTokenDuration(ctx))
expiresAt = time.Now().AddDate(0, 0, config.RobotTokenDuration(ctx)).Unix()
} else { } else {
durationStr := strconv.FormatInt(r.Duration, 10) durationStr := strconv.FormatInt(r.Duration, 10)
duration, err := strconv.Atoi(durationStr) duration, err := strconv.Atoi(durationStr)

View File

@ -925,6 +925,7 @@ func (bc *basicController) makeRobotAccount(ctx context.Context, projectID int64
Name: fmt.Sprintf("%s-%s-%s", scannerPrefix, registration.Name, UUID), Name: fmt.Sprintf("%s-%s-%s", scannerPrefix, registration.Name, UUID),
Description: "for scan", Description: "for scan",
ProjectID: projectID, ProjectID: projectID,
Duration: -1,
}, },
Level: robot.LEVELPROJECT, Level: robot.LEVELPROJECT,
Permissions: []*robot.Permission{ Permissions: []*robot.Permission{

View File

@ -199,6 +199,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
Name: rname, Name: rname,
Description: "for scan", Description: "for scan",
ProjectID: suite.artifact.ProjectID, ProjectID: suite.artifact.ProjectID,
Duration: -1,
}, },
Level: robot.LEVELPROJECT, Level: robot.LEVELPROJECT,
Permissions: []*robot.Permission{ Permissions: []*robot.Permission{
@ -229,6 +230,7 @@ func (suite *ControllerTestSuite) SetupSuite() {
Secret: "robot-account", Secret: "robot-account",
Description: "for scan", Description: "for scan",
ProjectID: suite.artifact.ProjectID, ProjectID: suite.artifact.ProjectID,
Duration: -1,
}, },
Level: "project", Level: "project",
}, nil) }, nil)
@ -336,7 +338,7 @@ func (suite *ControllerTestSuite) TestScanControllerScan() {
mock.OnAnything(suite.execMgr, "Create").Return(int64(1), nil).Once() mock.OnAnything(suite.execMgr, "Create").Return(int64(1), nil).Once()
mock.OnAnything(suite.taskMgr, "Create").Return(int64(1), nil).Once() mock.OnAnything(suite.taskMgr, "Create").Return(int64(1), nil).Once()
ctx := orm.NewContext(nil, &ormtesting.FakeOrmer{}) ctx := orm.NewContext(context.TODO(), &ormtesting.FakeOrmer{})
suite.Require().NoError(suite.c.Scan(ctx, suite.artifact)) suite.Require().NoError(suite.c.Scan(ctx, suite.artifact))
} }

View File

@ -86,8 +86,8 @@ func NewController() Controller {
func (c *controller) SecuritySummary(ctx context.Context, projectID int64, options ...Option) (*secHubModel.Summary, error) { func (c *controller) SecuritySummary(ctx context.Context, projectID int64, options ...Option) (*secHubModel.Summary, error) {
opts := newOptions(options...) opts := newOptions(options...)
scannerUUID, err := c.scannerMgr.DefaultScannerUUID(ctx) scannerUUID, err := c.scannerMgr.DefaultScannerUUID(ctx)
if err != nil { if len(scannerUUID) == 0 || err != nil {
return nil, err return &secHubModel.Summary{}, nil
} }
sum, err := c.secHubMgr.Summary(ctx, scannerUUID, projectID, nil) sum, err := c.secHubMgr.Summary(ctx, scannerUUID, projectID, nil)
if err != nil { if err != nil {

View File

@ -37,7 +37,7 @@ func init() {
} }
} }
func cleanupCallBack(ctx context.Context, param string) error { func cleanupCallBack(ctx context.Context, _ string) error {
err := cleanupController.Start(ctx, true, task.ExecutionTriggerSchedule) err := cleanupController.Start(ctx, true, task.ExecutionTriggerSchedule)
if err != nil { if err != nil {
logger.Errorf("System artifact cleanup job encountered errors: %v", err) logger.Errorf("System artifact cleanup job encountered errors: %v", err)

View File

@ -50,6 +50,7 @@ type Data struct {
BannerMessage string BannerMessage string
AuthProxySettings *models.HTTPAuthProxy AuthProxySettings *models.HTTPAuthProxy
Protected *protectedData Protected *protectedData
OIDCProviderName string
} }
type protectedData struct { type protectedData struct {
@ -103,6 +104,7 @@ func (c *controller) GetInfo(ctx context.Context, opt Options) (*Data, error) {
SelfRegistration: utils.SafeCastBool(cfg[common.SelfRegistration]), SelfRegistration: utils.SafeCastBool(cfg[common.SelfRegistration]),
HarborVersion: fmt.Sprintf("%s-%s", version.ReleaseVersion, version.GitCommit), HarborVersion: fmt.Sprintf("%s-%s", version.ReleaseVersion, version.GitCommit),
BannerMessage: utils.SafeCastString(mgr.Get(ctx, common.BannerMessage).GetString()), BannerMessage: utils.SafeCastString(mgr.Get(ctx, common.BannerMessage).GetString()),
OIDCProviderName: OIDCProviderName(cfg),
} }
if res.AuthMode == common.HTTPAuth { if res.AuthMode == common.HTTPAuth {
if s, err := config.HTTPAuthProxySetting(ctx); err == nil { if s, err := config.HTTPAuthProxySetting(ctx); err == nil {
@ -137,7 +139,15 @@ func (c *controller) GetInfo(ctx context.Context, opt Options) (*Data, error) {
return res, nil return res, nil
} }
func (c *controller) GetCapacity(ctx context.Context) (*imagestorage.Capacity, error) { func OIDCProviderName(cfg map[string]interface{}) string {
authMode := utils.SafeCastString(cfg[common.AUTHMode])
if authMode != common.OIDCAuth {
return ""
}
return utils.SafeCastString(cfg[common.OIDCName])
}
func (c *controller) GetCapacity(_ context.Context) (*imagestorage.Capacity, error) {
systeminfo.Init() systeminfo.Init()
return imagestorage.GlobalDriver.Cap() return imagestorage.GlobalDriver.Cap()
} }
@ -148,14 +158,15 @@ func (c *controller) GetCA(ctx context.Context) (io.ReadCloser, error) {
if len(testRootCertPath) > 0 { if len(testRootCertPath) > 0 {
path = testRootCertPath path = testRootCertPath
} }
if _, err := os.Stat(path); err == nil { _, err := os.Stat(path)
if err == nil {
return os.Open(path) return os.Open(path)
} else if os.IsNotExist(err) { } else if os.IsNotExist(err) {
return nil, errors.NotFoundError(fmt.Errorf("cert not found in path: %s", path)) return nil, errors.NotFoundError(fmt.Errorf("cert not found in path: %s", path))
} else {
logger.Errorf("Failed to stat the cert, path: %s, error: %v", path, err)
return nil, err
} }
// else
logger.Errorf("Failed to stat the cert, path: %s, error: %v", path, err)
return nil, err
} }
// NewController return an instance of controller // NewController return an instance of controller

View File

@ -105,3 +105,25 @@ func (s *sysInfoCtlTestSuite) TestGetInfo() {
func TestControllerSuite(t *testing.T) { func TestControllerSuite(t *testing.T) {
suite.Run(t, &sysInfoCtlTestSuite{}) suite.Run(t, &sysInfoCtlTestSuite{})
} }
func TestOIDCProviderName(t *testing.T) {
type args struct {
cfg map[string]interface{}
}
tests := []struct {
name string
args args
want string
}{
{"normal testing", args{map[string]interface{}{common.AUTHMode: common.OIDCAuth, common.OIDCName: "test"}}, "test"},
{"not oidc", args{map[string]interface{}{common.AUTHMode: common.DBAuth, common.OIDCName: "test"}}, ""},
{"empty provider", args{map[string]interface{}{common.AUTHMode: common.OIDCAuth, common.OIDCName: ""}}, ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := OIDCProviderName(tt.args.cfg); got != tt.want {
t.Errorf("OIDCProviderName() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -52,7 +52,7 @@ func init() {
} }
} }
func sweepCallback(ctx context.Context, p string) error { func sweepCallback(ctx context.Context, _ string) error {
params := &SweepParams{ExecRetainCounts: job.GetExecutionSweeperCount()} params := &SweepParams{ExecRetainCounts: job.GetExecutionSweeperCount()}
return SweepCtl.Start(ctx, params, task.ExecutionTriggerSchedule) return SweepCtl.Start(ctx, params, task.ExecutionTriggerSchedule)
} }

View File

@ -21,12 +21,15 @@ import (
commonmodels "github.com/goharbor/harbor/src/common/models" commonmodels "github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/security" "github.com/goharbor/harbor/src/common/security"
"github.com/goharbor/harbor/src/common/security/local" "github.com/goharbor/harbor/src/common/security/local"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl/gdpr"
"github.com/goharbor/harbor/src/lib" "github.com/goharbor/harbor/src/lib"
"github.com/goharbor/harbor/src/lib/config" "github.com/goharbor/harbor/src/lib/config"
"github.com/goharbor/harbor/src/lib/errors" "github.com/goharbor/harbor/src/lib/errors"
"github.com/goharbor/harbor/src/lib/q" "github.com/goharbor/harbor/src/lib/q"
"github.com/goharbor/harbor/src/pkg/member" "github.com/goharbor/harbor/src/pkg/member"
"github.com/goharbor/harbor/src/pkg/oidc" "github.com/goharbor/harbor/src/pkg/oidc"
"github.com/goharbor/harbor/src/pkg/task"
"github.com/goharbor/harbor/src/pkg/user" "github.com/goharbor/harbor/src/pkg/user"
"github.com/goharbor/harbor/src/pkg/user/models" "github.com/goharbor/harbor/src/pkg/user/models"
) )
@ -76,6 +79,8 @@ func NewController() Controller {
mgr: user.New(), mgr: user.New(),
oidcMetaMgr: oidc.NewMetaMgr(), oidcMetaMgr: oidc.NewMetaMgr(),
memberMgr: member.Mgr, memberMgr: member.Mgr,
taskMgr: task.NewManager(),
exeMgr: task.NewExecutionManager(),
} }
} }
@ -88,6 +93,8 @@ type controller struct {
mgr user.Manager mgr user.Manager
oidcMetaMgr oidc.MetaManager oidcMetaMgr oidc.MetaManager
memberMgr member.Manager memberMgr member.Manager
taskMgr task.Manager
exeMgr task.ExecutionManager
} }
func (c *controller) UpdateOIDCMeta(ctx context.Context, ou *commonmodels.OIDCUser, cols ...string) error { func (c *controller) UpdateOIDCMeta(ctx context.Context, ou *commonmodels.OIDCUser, cols ...string) error {
@ -183,10 +190,36 @@ func (c *controller) Delete(ctx context.Context, id int) error {
if err != nil { if err != nil {
return errors.UnknownError(err).WithMessage("failed to load GDPR setting: %v", err) return errors.UnknownError(err).WithMessage("failed to load GDPR setting: %v", err)
} }
if gdprSetting.DeleteUser {
return c.mgr.DeleteGDPR(ctx, id) if gdprSetting.AuditLogs {
userDb, err := c.mgr.Get(ctx, id)
if err != nil {
return errors.Wrap(err, "unable to get user information")
}
params := map[string]interface{}{
gdpr.UserNameParam: userDb.Username,
}
execID, err := c.exeMgr.Create(ctx, job.AuditLogsGDPRCompliantVendorType, -1, task.ExecutionTriggerEvent, params)
if err != nil {
return err
}
_, err = c.taskMgr.Create(ctx, execID, &task.Job{
Name: job.AuditLogsGDPRCompliantVendorType,
Metadata: &job.Metadata{
JobKind: job.KindGeneric,
},
Parameters: params,
})
if err != nil {
return err
}
} }
return c.mgr.Delete(ctx, id) if gdprSetting.DeleteUser {
err = c.mgr.DeleteGDPR(ctx, id)
} else {
err = c.mgr.Delete(ctx, id)
}
return err
} }
func (c *controller) List(ctx context.Context, query *q.Query, options ...models.Option) ([]*commonmodels.User, error) { func (c *controller) List(ctx context.Context, query *q.Query, options ...models.Option) ([]*commonmodels.User, error) {

View File

@ -88,35 +88,35 @@ type DefaultAuthenticateHelper struct {
} }
// Authenticate ... // Authenticate ...
func (d *DefaultAuthenticateHelper) Authenticate(ctx context.Context, m models.AuthModel) (*models.User, error) { func (d *DefaultAuthenticateHelper) Authenticate(_ context.Context, _ models.AuthModel) (*models.User, error) {
return nil, ErrNotSupported return nil, ErrNotSupported
} }
// OnBoardUser will check if a user exists in user table, if not insert the user and // OnBoardUser will check if a user exists in user table, if not insert the user and
// put the id in the pointer of user model, if it does exist, fill in the user model based // put the id in the pointer of user model, if it does exist, fill in the user model based
// on the data record of the user // on the data record of the user
func (d *DefaultAuthenticateHelper) OnBoardUser(ctx context.Context, u *models.User) error { func (d *DefaultAuthenticateHelper) OnBoardUser(_ context.Context, _ *models.User) error {
return ErrNotSupported return ErrNotSupported
} }
// SearchUser - Get user information from account repository // SearchUser - Get user information from account repository
func (d *DefaultAuthenticateHelper) SearchUser(ctx context.Context, username string) (*models.User, error) { func (d *DefaultAuthenticateHelper) SearchUser(_ context.Context, username string) (*models.User, error) {
log.Errorf("Not support searching user, username: %s", username) log.Errorf("Not support searching user, username: %s", username)
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", username) return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", username)
} }
// PostAuthenticate - Update user information after authenticate, such as OnBoard or sync info etc // PostAuthenticate - Update user information after authenticate, such as OnBoard or sync info etc
func (d *DefaultAuthenticateHelper) PostAuthenticate(ctx context.Context, u *models.User) error { func (d *DefaultAuthenticateHelper) PostAuthenticate(_ context.Context, _ *models.User) error {
return nil return nil
} }
// OnBoardGroup - OnBoardGroup, it will set the ID of the user group, if altGroupName is not empty, take the altGroupName as groupName in harbor DB. // OnBoardGroup - OnBoardGroup, it will set the ID of the user group, if altGroupName is not empty, take the altGroupName as groupName in harbor DB.
func (d *DefaultAuthenticateHelper) OnBoardGroup(ctx context.Context, u *model.UserGroup, altGroupName string) error { func (d *DefaultAuthenticateHelper) OnBoardGroup(_ context.Context, _ *model.UserGroup, _ string) error {
return ErrNotSupported return ErrNotSupported
} }
// SearchGroup - Search ldap group by group key, groupKey is the unique attribute of group in authenticator, for LDAP, the key is group DN // SearchGroup - Search ldap group by group key, groupKey is the unique attribute of group in authenticator, for LDAP, the key is group DN
func (d *DefaultAuthenticateHelper) SearchGroup(ctx context.Context, groupKey string) (*model.UserGroup, error) { func (d *DefaultAuthenticateHelper) SearchGroup(_ context.Context, groupKey string) (*model.UserGroup, error) {
log.Errorf("Not support searching group, group key: %s", groupKey) log.Errorf("Not support searching group, group key: %s", groupKey)
return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", groupKey) return nil, libErrors.NotFoundError(ErrNotSupported).WithMessage("%s not found", groupKey)
} }

View File

@ -102,13 +102,13 @@ func (a *Auth) Authenticate(ctx context.Context, m models.AuthModel) (*models.Us
return user, nil return user, nil
} else if resp.StatusCode == http.StatusUnauthorized { } else if resp.StatusCode == http.StatusUnauthorized {
return nil, auth.NewErrAuth(string(data)) return nil, auth.NewErrAuth(string(data))
} else {
data, err := io.ReadAll(resp.Body)
if err != nil {
log.Warningf("Failed to read response body, error: %v", err)
}
return nil, fmt.Errorf("failed to authenticate, status code: %d, text: %s", resp.StatusCode, string(data))
} }
// else
data, err = io.ReadAll(resp.Body)
if err != nil {
log.Warningf("Failed to read response body, error: %v", err)
}
return nil, fmt.Errorf("failed to authenticate, status code: %d, text: %s", resp.StatusCode, string(data))
} }
func (a *Auth) tokenReview(ctx context.Context, sessionID string) (*models.User, error) { func (a *Auth) tokenReview(ctx context.Context, sessionID string) (*models.User, error) {
@ -191,7 +191,7 @@ func (a *Auth) SearchGroup(ctx context.Context, groupKey string) (*model.UserGro
} }
// OnBoardGroup create user group entity in Harbor DB, altGroupName is not used. // OnBoardGroup create user group entity in Harbor DB, altGroupName is not used.
func (a *Auth) OnBoardGroup(ctx context.Context, u *model.UserGroup, altGroupName string) error { func (a *Auth) OnBoardGroup(ctx context.Context, u *model.UserGroup, _ string) error {
// if group name provided, on board the user group // if group name provided, on board the user group
if len(u.GroupName) == 0 { if len(u.GroupName) == 0 {
return errors.New("should provide a group name") return errors.New("should provide a group name")

View File

@ -58,9 +58,9 @@ func (ah *authHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
_, err := rw.Write([]byte(fmt.Sprintf(`{"session_id": "%s"}`, e.sessionID))) _, err := rw.Write([]byte(fmt.Sprintf(`{"session_id": "%s"}`, e.sessionID)))
if err != nil { if err != nil {
panic(err) panic(err)
} else {
return
} }
// else
return
} }
} }
http.Error(rw, fmt.Sprintf("Do not find entry in entrylist, username: %s", html.EscapeString(u)), http.StatusUnauthorized) http.Error(rw, fmt.Sprintf("Do not find entry in entrylist, username: %s", html.EscapeString(u)), http.StatusUnauthorized)
@ -89,9 +89,9 @@ func (rth *reviewTokenHandler) ServeHTTP(rw http.ResponseWriter, req *http.Reque
_, err := rw.Write([]byte(fmt.Sprintf(reviewStatusTpl, e.username))) _, err := rw.Write([]byte(fmt.Sprintf(reviewStatusTpl, e.username)))
if err != nil { if err != nil {
panic(err) panic(err)
} else {
return
} }
// else return
return
} }
} }
http.Error(rw, html.EscapeString(fmt.Sprintf("failed to match token: %s, entrylist: %+v", reviewData.Spec.Token, rth.entries)), http.StatusUnauthorized) http.Error(rw, html.EscapeString(fmt.Sprintf("failed to match token: %s, entrylist: %+v", reviewData.Spec.Token, rth.entries)), http.StatusUnauthorized)

View File

@ -54,7 +54,7 @@ func (d *Auth) SearchUser(ctx context.Context, username string) (*models.User, e
} }
// OnBoardUser - // OnBoardUser -
func (d *Auth) OnBoardUser(ctx context.Context, u *models.User) error { func (d *Auth) OnBoardUser(_ context.Context, _ *models.User) error {
return nil return nil
} }

View File

@ -30,7 +30,7 @@ type Auth struct {
} }
// SearchGroup is skipped in OIDC mode, so it makes sure any group will be onboarded. // SearchGroup is skipped in OIDC mode, so it makes sure any group will be onboarded.
func (a *Auth) SearchGroup(ctx context.Context, groupKey string) (*model.UserGroup, error) { func (a *Auth) SearchGroup(_ context.Context, groupKey string) (*model.UserGroup, error) {
return &model.UserGroup{ return &model.UserGroup{
GroupName: groupKey, GroupName: groupKey,
GroupType: common.OIDCGroupType, GroupType: common.OIDCGroupType,
@ -38,7 +38,7 @@ func (a *Auth) SearchGroup(ctx context.Context, groupKey string) (*model.UserGro
} }
// OnBoardGroup create user group entity in Harbor DB, altGroupName is not used. // OnBoardGroup create user group entity in Harbor DB, altGroupName is not used.
func (a *Auth) OnBoardGroup(ctx context.Context, u *model.UserGroup, altGroupName string) error { func (a *Auth) OnBoardGroup(ctx context.Context, u *model.UserGroup, _ string) error {
// if group name provided, on board the user group // if group name provided, on board the user group
if len(u.GroupName) == 0 || u.GroupType != common.OIDCGroupType { if len(u.GroupName) == 0 || u.GroupType != common.OIDCGroupType {
return fmt.Errorf("invalid input group for OIDC mode: %v", *u) return fmt.Errorf("invalid input group for OIDC mode: %v", *u)

View File

@ -63,7 +63,13 @@ func (oc *OIDCController) RedirectLogin() {
oc.SendInternalServerError(err) oc.SendInternalServerError(err)
return return
} }
if err := oc.SetSession(redirectURLKey, oc.Ctx.Request.URL.Query().Get("redirect_url")); err != nil { redirectURL := oc.Ctx.Request.URL.Query().Get("redirect_url")
if !utils.IsLocalPath(redirectURL) {
log.Errorf("invalid redirect url: %v", redirectURL)
oc.SendBadRequestError(fmt.Errorf("cannot redirect to other site"))
return
}
if err := oc.SetSession(redirectURLKey, redirectURL); err != nil {
log.Errorf("failed to set session for key: %s, error: %v", redirectURLKey, err) log.Errorf("failed to set session for key: %s, error: %v", redirectURLKey, err)
oc.SendInternalServerError(err) oc.SendInternalServerError(err)
return return
@ -247,8 +253,8 @@ func (oc *OIDCController) Onboard() {
oc.SendBadRequestError(errors.New("username with illegal length")) oc.SendBadRequestError(errors.New("username with illegal length"))
return return
} }
if utils.IsContainIllegalChar(username, []string{",", "~", "#", "$", "%"}) { if strings.ContainsAny(username, common.IllegalCharsInUsername) {
oc.SendBadRequestError(errors.New("username contains illegal characters")) oc.SendBadRequestError(errors.Errorf("username %v contains illegal characters: %v", username, common.IllegalCharsInUsername))
return return
} }

View File

@ -127,8 +127,6 @@ func main() {
web.BConfig.WebConfig.Session.SessionOn = true web.BConfig.WebConfig.Session.SessionOn = true
web.BConfig.WebConfig.Session.SessionName = config.SessionCookieName web.BConfig.WebConfig.Session.SessionName = config.SessionCookieName
web.BConfig.MaxMemory = 1 << 35 // (32GB)
web.BConfig.MaxUploadSize = 1 << 35 // (32GB)
// the core db used for beego session // the core db used for beego session
redisCoreURL := os.Getenv("_REDIS_URL_CORE") redisCoreURL := os.Getenv("_REDIS_URL_CORE")
if len(redisCoreURL) > 0 { if len(redisCoreURL) > 0 {
@ -163,6 +161,12 @@ func main() {
log.Info("initializing configurations...") log.Info("initializing configurations...")
config.Init() config.Init()
log.Info("configurations initialization completed") log.Info("configurations initialization completed")
// default beego max memory and max upload size is 128GB, consider from some AI related image would be large,
// also support customize it from the environment variables if the default value cannot satisfy some scenarios.
web.BConfig.MaxMemory = config.GetBeegoMaxMemoryBytes()
web.BConfig.MaxUploadSize = config.GetBeegoMaxUploadSizeBytes()
metricCfg := config.Metric() metricCfg := config.Metric()
if metricCfg.Enabled { if metricCfg.Enabled {
metric.RegisterCollectors() metric.RegisterCollectors()

View File

@ -39,8 +39,9 @@ import (
) )
var ( var (
match = regexp.MustCompile match = regexp.MustCompile
numericRegexp = match(`[0-9]+`) numericRegexp = match(`[0-9]+`)
serviceTokenRegexp = match(`^/service/token`)
// The ping endpoint will be blocked when DB conns reach the max open conns of the sql.DB // The ping endpoint will be blocked when DB conns reach the max open conns of the sql.DB
// which will make ping request timeout, so skip the middlewares which will require DB conn. // which will make ping request timeout, so skip the middlewares which will require DB conn.
@ -54,6 +55,7 @@ var (
dbTxSkippers = []middleware.Skipper{ dbTxSkippers = []middleware.Skipper{
middleware.MethodAndPathSkipper(http.MethodPatch, distribution.BlobUploadURLRegexp), middleware.MethodAndPathSkipper(http.MethodPatch, distribution.BlobUploadURLRegexp),
middleware.MethodAndPathSkipper(http.MethodPut, distribution.BlobUploadURLRegexp), middleware.MethodAndPathSkipper(http.MethodPut, distribution.BlobUploadURLRegexp),
middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/token")),
func(r *http.Request) bool { // skip tx for GET, HEAD and Options requests func(r *http.Request) bool { // skip tx for GET, HEAD and Options requests
m := r.Method m := r.Method
return m == http.MethodGet || m == http.MethodHead || m == http.MethodOptions return m == http.MethodGet || m == http.MethodHead || m == http.MethodOptions
@ -72,6 +74,10 @@ var (
middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/replication/task/"+numericRegexp.String())), middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/replication/task/"+numericRegexp.String())),
middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/retention/task/"+numericRegexp.String())), middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/retention/task/"+numericRegexp.String())),
middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/schedules/"+numericRegexp.String())), middleware.MethodAndPathSkipper(http.MethodPost, match("^/service/notifications/jobs/schedules/"+numericRegexp.String())),
// Harbor doesn't handle the POST request to /service/token. beego framework return 405 for the POST request
// some client, such as containerd, may send the POST request to /service/token and depends on 405/404/401/400 return code to determine continue or not
// the read only middleware returns 403 before the beego framework, so skip this request to make the client continue
middleware.MethodAndPathSkipper(http.MethodPost, serviceTokenRegexp),
pingSkipper, pingSkipper,
} }
) )

View File

@ -22,7 +22,7 @@ import (
"github.com/docker/distribution/registry/auth/token" "github.com/docker/distribution/registry/auth/token"
"github.com/docker/libtrust" "github.com/docker/libtrust"
"github.com/golang-jwt/jwt/v4" "github.com/golang-jwt/jwt/v5"
"github.com/goharbor/harbor/src/common/models" "github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/security" "github.com/goharbor/harbor/src/common/security"

View File

@ -124,7 +124,7 @@ type accessFilter interface {
type registryFilter struct { type registryFilter struct {
} }
func (reg registryFilter) filter(ctx context.Context, ctl project.Controller, func (reg registryFilter) filter(ctx context.Context, _ project.Controller,
a *token.ResourceActions) error { a *token.ResourceActions) error {
// Do not filter if the request is to access registry catalog // Do not filter if the request is to access registry catalog
if a.Name != "catalog" { if a.Name != "catalog" {

View File

@ -27,7 +27,7 @@ import (
"testing" "testing"
"github.com/docker/distribution/registry/auth/token" "github.com/docker/distribution/registry/auth/token"
jwt "github.com/golang-jwt/jwt/v4" jwt "github.com/golang-jwt/jwt/v5"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/goharbor/harbor/src/common/rbac" "github.com/goharbor/harbor/src/common/rbac"

Some files were not shown because too many files have changed in this diff Show More