mirror of
https://github.com/goharbor/harbor.git
synced 2024-11-22 02:05:41 +01:00
Merge remote-tracking branch 'upstream/master' into 190311_sync
This commit is contained in:
commit
772367498f
5
.github/stale.yml
vendored
5
.github/stale.yml
vendored
@ -7,6 +7,11 @@ exemptLabels:
|
||||
- backlog
|
||||
- kind/debt
|
||||
- need-triage
|
||||
- kind/requirement
|
||||
- king/bug
|
||||
- Epic
|
||||
- target/1.7.5
|
||||
- target/1.8.0
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: staled
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -40,3 +40,4 @@ src/portal/src/**/*.js.map
|
||||
**/aot
|
||||
**/dist
|
||||
**/.bin
|
||||
src/core/conf/app.conf
|
||||
|
52
Makefile
52
Makefile
@ -4,13 +4,13 @@
|
||||
#
|
||||
# all: prepare env, compile binaries, build images and install images
|
||||
# prepare: prepare env
|
||||
# compile: compile adminserver, ui and jobservice code
|
||||
# compile: compile core and jobservice code
|
||||
#
|
||||
# compile_golangimage:
|
||||
# compile from golang image
|
||||
# for example: make compile_golangimage -e GOBUILDIMAGE= \
|
||||
# golang:1.11.2
|
||||
# compile_adminserver, compile_core, compile_jobservice: compile specific binary
|
||||
# compile_core, compile_jobservice: compile specific binary
|
||||
#
|
||||
# build: build Harbor docker images from photon baseimage
|
||||
#
|
||||
@ -43,7 +43,7 @@
|
||||
#
|
||||
# clean: remove binary, Harbor images, specific version docker-compose \
|
||||
# file, specific version tag and online/offline install package
|
||||
# cleanbinary: remove adminserver, ui and jobservice binary
|
||||
# cleanbinary: remove core and jobservice binary
|
||||
# cleanimage: remove Harbor images
|
||||
# cleandockercomposefile:
|
||||
# remove specific version docker-compose
|
||||
@ -102,6 +102,7 @@ CLAIRVERSION=v2.0.7
|
||||
CLAIRDBVERSION=$(VERSIONTAG)
|
||||
MIGRATORVERSION=$(VERSIONTAG)
|
||||
REDISVERSION=$(VERSIONTAG)
|
||||
NOTARYMIGRATEVERSION=v3.5.4
|
||||
|
||||
# version of chartmuseum
|
||||
CHARTMUSEUMVERSION=v0.8.1
|
||||
@ -128,25 +129,24 @@ GOBUILDIMAGE=golang:1.11.2
|
||||
GOBUILDPATH=$(GOBASEPATH)/harbor
|
||||
GOIMAGEBUILDCMD=/usr/local/go/bin/go
|
||||
GOIMAGEBUILD=$(GOIMAGEBUILDCMD) build
|
||||
GOBUILDPATH_ADMINSERVER=$(GOBUILDPATH)/src/adminserver
|
||||
GOBUILDPATH_CORE=$(GOBUILDPATH)/src/core
|
||||
GOBUILDPATH_JOBSERVICE=$(GOBUILDPATH)/src/jobservice
|
||||
GOBUILDPATH_REGISTRYCTL=$(GOBUILDPATH)/src/registryctl
|
||||
GOBUILDPATH_MIGRATEPATCH=$(GOBUILDPATH)/src/cmd/migrate-patch
|
||||
GOBUILDMAKEPATH=$(GOBUILDPATH)/make
|
||||
GOBUILDMAKEPATH_ADMINSERVER=$(GOBUILDMAKEPATH)/photon/adminserver
|
||||
GOBUILDMAKEPATH_CORE=$(GOBUILDMAKEPATH)/photon/core
|
||||
GOBUILDMAKEPATH_JOBSERVICE=$(GOBUILDMAKEPATH)/photon/jobservice
|
||||
GOBUILDMAKEPATH_REGISTRYCTL=$(GOBUILDMAKEPATH)/photon/registryctl
|
||||
GOBUILDMAKEPATH_NOTARY=$(GOBUILDMAKEPATH)/photon/notary
|
||||
|
||||
# binary
|
||||
ADMINSERVERBINARYPATH=$(MAKEDEVPATH)/adminserver
|
||||
ADMINSERVERBINARYNAME=harbor_adminserver
|
||||
CORE_BINARYPATH=$(MAKEDEVPATH)/core
|
||||
CORE_BINARYNAME=harbor_core
|
||||
JOBSERVICEBINARYPATH=$(MAKEDEVPATH)/jobservice
|
||||
JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
REGISTRYCTLBINARYPATH=$(MAKEDEVPATH)/registryctl
|
||||
REGISTRYCTLBINARYNAME=harbor_registryctl
|
||||
MIGRATEPATCHBINARYNAME=migrate-patch
|
||||
|
||||
# configfile
|
||||
CONFIGPATH=$(MAKEPATH)
|
||||
@ -174,7 +174,6 @@ MAKEFILEPATH_PHOTON=$(MAKEPATH)/photon
|
||||
DOCKERFILEPATH_COMMON=$(MAKEPATH)/common
|
||||
|
||||
# docker image name
|
||||
DOCKERIMAGENAME_ADMINSERVER=goharbor/harbor-adminserver
|
||||
DOCKERIMAGENAME_PORTAL=goharbor/harbor-portal
|
||||
DOCKERIMAGENAME_CORE=goharbor/harbor-core
|
||||
DOCKERIMAGENAME_JOBSERVICE=goharbor/harbor-jobservice
|
||||
@ -209,8 +208,7 @@ REGISTRYUSER=user
|
||||
REGISTRYPASSWORD=default
|
||||
|
||||
# cmds
|
||||
DOCKERSAVE_PARA=$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
DOCKERSAVE_PARA= $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_CORE):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
|
||||
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
|
||||
@ -223,15 +221,13 @@ PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
|
||||
$(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE $(HARBORPKG)/install.sh \
|
||||
$(HARBORPKG)/harbor.cfg $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME) \
|
||||
$(HARBORPKG)/open_source_license
|
||||
|
||||
$(HARBORPKG)/harbor.cfg $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
PACKAGE_ONLINE_PARA=-zcvf harbor-online-installer-$(PKGVERSIONTAG).tgz \
|
||||
$(HARBORPKG)/common/templates $(HARBORPKG)/prepare \
|
||||
$(HARBORPKG)/LICENSE \
|
||||
$(HARBORPKG)/install.sh $(HARBORPKG)/$(DOCKERCOMPOSEFILENAME) \
|
||||
$(HARBORPKG)/harbor.cfg \
|
||||
$(HARBORPKG)/open_source_license
|
||||
$(HARBORPKG)/harbor.cfg
|
||||
|
||||
DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
|
||||
|
||||
@ -264,13 +260,6 @@ ui_version:
|
||||
check_environment:
|
||||
@$(MAKEPATH)/$(CHECKENVCMD)
|
||||
|
||||
compile_adminserver:
|
||||
@echo "compiling binary for adminserver (golang image)..."
|
||||
@echo $(GOBASEPATH)
|
||||
@echo $(GOBUILDPATH)
|
||||
$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_ADMINSERVER) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_ADMINSERVER)/$(ADMINSERVERBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
compile_core:
|
||||
@echo "compiling binary for core (golang image)..."
|
||||
@echo $(GOBASEPATH)
|
||||
@ -288,7 +277,12 @@ compile_registryctl:
|
||||
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_REGISTRYCTL) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_REGISTRYCTL)/$(REGISTRYCTLBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
compile:check_environment compile_adminserver compile_core compile_jobservice compile_registryctl
|
||||
compile_notary_migrate_patch:
|
||||
@echo "compiling binary for migrate patch (golang image)..."
|
||||
@$(DOCKERCMD) run --rm -v $(BUILDPATH):$(GOBUILDPATH) -w $(GOBUILDPATH_MIGRATEPATCH) $(GOBUILDIMAGE) $(GOIMAGEBUILD) -o $(GOBUILDMAKEPATH_NOTARY)/$(MIGRATEPATCHBINARYNAME)
|
||||
@echo "Done."
|
||||
|
||||
compile:check_environment compile_core compile_jobservice compile_registryctl compile_notary_migrate_patch
|
||||
|
||||
prepare:
|
||||
@echo "preparing..."
|
||||
@ -296,7 +290,7 @@ prepare:
|
||||
|
||||
build:
|
||||
make -f $(MAKEFILEPATH_PHOTON)/Makefile build -e DEVFLAG=$(DEVFLAG) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e NGINXVERSION=$(NGINXVERSION) -e NOTARYVERSION=$(NOTARYVERSION) \
|
||||
-e REGISTRYVERSION=$(REGISTRYVERSION) -e NGINXVERSION=$(NGINXVERSION) -e NOTARYVERSION=$(NOTARYVERSION) -e NOTARYMIGRATEVERSION=$(NOTARYMIGRATEVERSION) \
|
||||
-e CLAIRVERSION=$(CLAIRVERSION) -e CLAIRDBVERSION=$(CLAIRDBVERSION) -e VERSIONTAG=$(VERSIONTAG) \
|
||||
-e BUILDBIN=$(BUILDBIN) -e REDISVERSION=$(REDISVERSION) -e MIGRATORVERSION=$(MIGRATORVERSION) \
|
||||
-e CHARTMUSEUMVERSION=$(CHARTMUSEUMVERSION) -e DOCKERIMAGENAME_CHART_SERVER=$(DOCKERIMAGENAME_CHART_SERVER)
|
||||
@ -320,7 +314,6 @@ modify_composefile_clair:
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__postgresql_version__/$(CLAIRDBVERSION)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
@$(SEDCMD) -i -e 's/__clair_version__/$(CLAIRVERSION)-$(VERSIONTAG)/g' $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
|
||||
|
||||
modify_composefile_chartmuseum:
|
||||
@echo "preparing docker-compose chartmuseum file..."
|
||||
@cp $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMTPLFILENAME) $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECHARTMUSEUMFILENAME)
|
||||
@ -344,7 +337,6 @@ package_online: modify_composefile
|
||||
$(HARBORPKG)/docker-compose.yml ; \
|
||||
fi
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
@cp open_source_license $(HARBORPKG)/open_source_license
|
||||
|
||||
@$(TARCMD) $(PACKAGE_ONLINE_PARA)
|
||||
@rm -rf $(HARBORPKG)
|
||||
@ -354,7 +346,6 @@ package_offline: compile ui_version build modify_sourcefiles modify_composefile
|
||||
@echo "packing offline package ..."
|
||||
@cp -r make $(HARBORPKG)
|
||||
@cp LICENSE $(HARBORPKG)/LICENSE
|
||||
@cp open_source_license $(HARBORPKG)/open_source_license
|
||||
|
||||
@echo "saving harbor docker image"
|
||||
@$(DOCKERSAVE) $(DOCKERSAVE_PARA) > $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar
|
||||
@ -409,11 +400,6 @@ govet:
|
||||
|
||||
pushimage:
|
||||
@echo "pushing harbor images ..."
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@$(DOCKERRMIMAGE) $(REGISTRYSERVER)$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG)
|
||||
|
||||
@$(DOCKERTAG) $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) $(REGISTRYSERVER)$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG)
|
||||
@$(PUSHSCRIPTPATH)/$(PUSHSCRIPTNAME) $(REGISTRYSERVER)$(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) \
|
||||
$(REGISTRYUSER) $(REGISTRYPASSWORD) $(REGISTRYSERVER)
|
||||
@ -466,13 +452,11 @@ swagger_client:
|
||||
|
||||
cleanbinary:
|
||||
@echo "cleaning binary..."
|
||||
@if [ -f $(ADMINSERVERBINARYPATH)/$(ADMINSERVERBINARYNAME) ] ; then rm $(ADMINSERVERBINARYPATH)/$(ADMINSERVERBINARYNAME) ; fi
|
||||
@if [ -f $(CORE_BINARYPATH)/$(CORE_BINARYNAME) ] ; then rm $(CORE_BINARYPATH)/$(CORE_BINARYNAME) ; fi
|
||||
@if [ -f $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ] ; then rm $(JOBSERVICEBINARYPATH)/$(JOBSERVICEBINARYNAME) ; fi
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_CORE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_DB):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
|
@ -15,11 +15,11 @@ describes governance guidelines and maintainer responsibilities.
|
||||
| Qian Deng | [ninjadq](https://github.com/ninjadq) | [VMware](https://www.github.com/vmware/) |
|
||||
| Mia Zhou | [zhoumeina](https://github.com/zhoumeina) | [VMware](https://www.github.com/vmware/) |
|
||||
| Steven Zou | [steven-zou](https://github.com/steven-zou) | [VMware](https://www.github.com/vmware/) |
|
||||
| James Zabala | [clouderati](https://github.com/clouderati) | [VMware](https://www.github.com/vmware/) |
|
||||
|
||||
|
||||
# Maintainers
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| ---------- | --------- | ----------- |
|
||||
| Nathan Lowe | [nlowe](https://github.com/nlowe) | [Hyland Software](https://github.com/HylandSoftware) |
|
||||
| De Chen | [cd1989](https://github.com/cd1989) | [Caicloud](https://github.com/caicloud) |
|
||||
| Mingming Pei | [mmpei](https://github.com/mmpei) | [Netease](https://github.com/netease) |
|
||||
| Fanjian Kong | [kofj](https://github.com/kofj) | [Qihoo360](https://github.com/Qihoo360) |
|
||||
|
@ -53,9 +53,9 @@ Refer to **[User Guide](docs/user_guide.md)** for more details on how to use Har
|
||||
## Community
|
||||
|
||||
* **Twitter:** [@project_harbor](https://twitter.com/project_harbor)
|
||||
* **User Group:** Join Harbor user email group: [harbor-users@googlegroups.com](https://groups.google.com/forum/#!forum/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback. To subscribe, send an email to [harbor-users+subscribe@googlegroups.com](mailto:harbor-users+subscribe@googlegroups.com) .
|
||||
* **Developer Group:** Join Harbor developer group: [harbor-dev@googlegroups.com](https://groups.google.com/forum/#!forum/harbor-dev) for discussion on Harbor development and contribution. To subscribe, send an email to [harbor-dev+subscribe@googlegroups.com](mailto:harbor-dev+subscribe@googlegroups.com).
|
||||
* **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: #harbor and #harbor-dev
|
||||
* **User Group:** Join Harbor user email group: [harbor-users@lists.cncf.io](https://lists.cncf.io/g/harbor-users) to get update of Harbor's news, features, releases, or to provide suggestion and feedback.
|
||||
* **Developer Group:** Join Harbor developer group: [harbor-dev@lists.cncf.io](https://lists.cncf.io/g/harbor-dev) for discussion on Harbor development and contribution.
|
||||
* **Slack:** Join Harbor's community for discussion and ask questions: [Cloud Native Computing Foundation](https://slack.cncf.io/), channel: [#harbor](https://cloud-native.slack.com/messages/harbor/) and [#harbor-dev](https://cloud-native.slack.com/messages/harbor-dev/)
|
||||
|
||||
## Additional Tools
|
||||
|
||||
|
@ -210,7 +210,6 @@ Stopping registry ... done
|
||||
Stopping redis ... done
|
||||
Stopping registryctl ... done
|
||||
Stopping harbor-db ... done
|
||||
Stopping harbor-adminserver ... done
|
||||
Stopping harbor-log ... done
|
||||
```
|
||||
Restarting Harbor after stopping:
|
||||
@ -220,7 +219,6 @@ Starting log ... done
|
||||
Starting registry ... done
|
||||
Starting registryctl ... done
|
||||
Starting postgresql ... done
|
||||
Starting adminserver ... done
|
||||
Starting core ... done
|
||||
Starting portal ... done
|
||||
Starting redis ... done
|
||||
@ -390,7 +388,6 @@ By default, Harbor limits the CPU usage of Clair container to 150000 and avoids
|
||||
$ sudo docker-compose ps
|
||||
Name Command State Ports
|
||||
-----------------------------------------------------------------------------------------------------------------------------
|
||||
harbor-adminserver /harbor/start.sh Up
|
||||
harbor-core /harbor/start.sh Up
|
||||
harbor-db /entrypoint.sh postgres Up 5432/tcp
|
||||
harbor-jobservice /harbor/start.sh Up
|
||||
|
@ -47,8 +47,8 @@ or above it's not necessary to call the migrator tool to migrate the schema.
|
||||
```
|
||||
docker run -it --rm -v ${harbor_cfg}:/harbor-migration/harbor-cfg/harbor.cfg goharbor/harbor-migrator:[tag] --cfg up
|
||||
```
|
||||
**NOTE:** The schema upgrade and data migration of Database is performed by adminserver when Harbor starts, if the migration fails,
|
||||
please check the log of adminserver to debug.
|
||||
**NOTE:** The schema upgrade and data migration of Database is performed by core when Harbor starts, if the migration fails,
|
||||
please check the log of core to debug.
|
||||
|
||||
6. Under the directory `./harbor`, run the `./install.sh` script to install the new Harbor instance. If you choose to install Harbor with components like Notary, Clair, and chartmuseum, refer to [Installation & Configuration Guide](../docs/installation_guide.md) for more information.
|
||||
|
||||
|
@ -748,6 +748,45 @@ paths:
|
||||
description: User need to log in first.
|
||||
'500':
|
||||
description: Internal errors.
|
||||
/users/search:
|
||||
get:
|
||||
summary: Search users by username, email
|
||||
description: |
|
||||
This endpoint is to search the users by username, email.
|
||||
parameters:
|
||||
- name: username
|
||||
in: query
|
||||
type: string
|
||||
required: false
|
||||
description: Username for filtering results.
|
||||
- name: email
|
||||
in: query
|
||||
type: string
|
||||
required: false
|
||||
description: Email for filtering results.
|
||||
- name: page
|
||||
in: query
|
||||
type: integer
|
||||
format: int32
|
||||
required: false
|
||||
description: 'The page nubmer, default is 1.'
|
||||
- name: page_size
|
||||
in: query
|
||||
type: integer
|
||||
format: int32
|
||||
required: false
|
||||
description: The size of per page.
|
||||
tags:
|
||||
- Products
|
||||
responses:
|
||||
'200':
|
||||
description: Search users by username, email successfully.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/definitions/UserSearch'
|
||||
'500':
|
||||
description: Unexpected internal errors.
|
||||
'/users/{user_id}':
|
||||
get:
|
||||
summary: Get a user's profile.
|
||||
@ -3849,6 +3888,15 @@ definitions:
|
||||
type: string
|
||||
update_time:
|
||||
type: string
|
||||
UserSearch:
|
||||
type: object
|
||||
properties:
|
||||
user_id:
|
||||
type: integer
|
||||
format: int
|
||||
description: The ID of the user.
|
||||
username:
|
||||
type: string
|
||||
Password:
|
||||
type: object
|
||||
properties:
|
||||
@ -5074,15 +5122,10 @@ definitions:
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
description: The schedule type. The valid values are daily, weekly and None. 'None' means to cancel the schedule.
|
||||
weekday:
|
||||
type: integer
|
||||
format: int8
|
||||
description: 'Optional, only used when the type is weekly. The valid values are 1-7.'
|
||||
offtime:
|
||||
type: integer
|
||||
format: int64
|
||||
description: 'The time offset with the UTC 00:00 in seconds.'
|
||||
description: The schedule type. The valid values are hourly, daily, weekly, custom and None. 'None' means to cancel the schedule.
|
||||
cron:
|
||||
type: string
|
||||
description: A cron expression, a time-based job scheduler.
|
||||
SearchResult:
|
||||
type: object
|
||||
description: The chart search result item
|
||||
@ -5137,6 +5180,9 @@ definitions:
|
||||
description:
|
||||
type: string
|
||||
description: The description of robot account
|
||||
expiresat:
|
||||
type: integer
|
||||
description: The expiration of robot account (in seconds)
|
||||
project_id:
|
||||
type: integer
|
||||
description: The project id of robot account
|
||||
|
@ -15,9 +15,8 @@ all | prepare env, compile binaries, build images and install im
|
||||
prepare | prepare env
|
||||
compile | compile ui and jobservice code
|
||||
compile_portal | compile portal code
|
||||
compile_ui | compile ui binary
|
||||
compile_core | compile core binary
|
||||
compile_jobservice | compile jobservice binary
|
||||
compile_adminserver | compile admin server binary
|
||||
build | build Harbor docker images (default: using build_photon)
|
||||
build_photon | build Harbor docker images from Photon OS base image
|
||||
install | compile binaries, build images, prepare specific version of compose file and startup Harbor instance
|
||||
|
@ -2,7 +2,6 @@ LOG_LEVEL=info
|
||||
CONFIG_PATH=/etc/core/app.conf
|
||||
CORE_SECRET=$core_secret
|
||||
JOBSERVICE_SECRET=$jobservice_secret
|
||||
ADMINSERVER_URL=$adminserver_url
|
||||
UAA_CA_ROOT=/etc/core/certificates/uaa_ca.pem
|
||||
_REDIS_URL=$redis_host:$redis_port,100,$redis_password
|
||||
SYNC_REGISTRY=false
|
||||
|
@ -27,6 +27,8 @@ auth:
|
||||
realm: $public_url/service/token
|
||||
rootcertbundle: /etc/registry/root.crt
|
||||
service: harbor-registry
|
||||
validation:
|
||||
disabled: true
|
||||
notifications:
|
||||
endpoints:
|
||||
- name: harbor
|
||||
|
@ -99,7 +99,7 @@ services:
|
||||
container_name: harbor-core
|
||||
env_file:
|
||||
- ./common/config/core/env
|
||||
- ./common/config/adminserver/env
|
||||
- ./common/config/core/config_env
|
||||
restart: always
|
||||
cap_drop:
|
||||
- ALL
|
||||
|
@ -1,12 +1,26 @@
|
||||
/*add robot account table*/
|
||||
CREATE TABLE robot (
|
||||
id SERIAL PRIMARY KEY NOT NULL,
|
||||
name varchar(255),
|
||||
description varchar(1024),
|
||||
project_id int,
|
||||
expiresat bigint,
|
||||
disabled boolean DEFAULT false NOT NULL,
|
||||
creation_time timestamp default CURRENT_TIMESTAMP,
|
||||
update_time timestamp default CURRENT_TIMESTAMP,
|
||||
CONSTRAINT unique_robot UNIQUE (name, project_id)
|
||||
);
|
||||
|
||||
CREATE TRIGGER robot_update_time_at_modtime BEFORE UPDATE ON robot FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||
CREATE TRIGGER robot_update_time_at_modtime BEFORE UPDATE ON robot FOR EACH ROW EXECUTE PROCEDURE update_update_time_at_column();
|
||||
|
||||
/*add master role*/
|
||||
INSERT INTO role (role_code, name) VALUES ('DRWS', 'master');
|
||||
|
||||
/*delete replication jobs whose policy has been marked as "deleted"*/
|
||||
DELETE FROM replication_job AS j
|
||||
USING replication_policy AS p
|
||||
WHERE j.policy_id = p.id AND p.deleted = TRUE;
|
||||
|
||||
/*delete replication policy which has been marked as "deleted"*/
|
||||
DELETE FROM replication_policy AS p
|
||||
WHERE p.deleted = TRUE;
|
@ -1 +0,0 @@
|
||||
INSERT INTO role (role_code, name) VALUES ('DRWS', 'master');
|
@ -3,7 +3,7 @@
|
||||
# Targets:
|
||||
#
|
||||
# build: build harbor photon images
|
||||
# clean: clean adminserver, ui and jobservice harbor images
|
||||
# clean: clean core and jobservice harbor images
|
||||
|
||||
# common
|
||||
SHELL := /bin/bash
|
||||
@ -21,10 +21,7 @@ DOCKERBUILD=$(DOCKERCMD) build --pull
|
||||
DOCKERRMIMAGE=$(DOCKERCMD) rmi
|
||||
DOCKERIMASES=$(DOCKERCMD) images
|
||||
|
||||
# binary
|
||||
ADMINSERVERSOURCECODE=$(SRCPATH)/adminserver
|
||||
ADMINSERVERBINARYPATH=$(MAKEDEVPATH)/adminserver
|
||||
ADMINSERVERBINARYNAME=harbor_adminserver
|
||||
# binary
|
||||
CORE_SOURCECODE=$(SRCPATH)/core
|
||||
CORE_BINARYPATH=$(MAKEDEVPATH)/core
|
||||
CORE_BINARYNAME=harbor_core
|
||||
@ -35,10 +32,6 @@ JOBSERVICEBINARYNAME=harbor_jobservice
|
||||
# photon dockerfile
|
||||
DOCKERFILEPATH=$(MAKEPATH)/photon
|
||||
|
||||
DOCKERFILEPATH_ADMINSERVER=$(DOCKERFILEPATH)/adminserver
|
||||
DOCKERFILENAME_ADMINSERVER=Dockerfile
|
||||
DOCKERIMAGENAME_ADMINSERVER=goharbor/harbor-adminserver
|
||||
|
||||
DOCKERFILEPATH_PORTAL=$(DOCKERFILEPATH)/portal
|
||||
DOCKERFILENAME_PORTAL=Dockerfile
|
||||
DOCKERIMAGENAME_PORTAL=goharbor/harbor-portal
|
||||
@ -105,11 +98,6 @@ _build_db:
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_DB)/$(DOCKERFILENAME_DB) -t $(DOCKERIMAGENAME_DB):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_adminserver:
|
||||
@echo "building adminserver container for photon..."
|
||||
@$(DOCKERBUILD) -f $(DOCKERFILEPATH_ADMINSERVER)/$(DOCKERFILENAME_ADMINSERVER) -t $(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) .
|
||||
@echo "Done."
|
||||
|
||||
_build_portal:
|
||||
@echo "building portal container for photon..."
|
||||
$(DOCKERBUILD) -f $(DOCKERFILEPATH_PORTAL)/$(DOCKERFILENAME_PORTAL) -t $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG) .
|
||||
@ -167,13 +155,11 @@ _build_notary:
|
||||
@if [ "$(NOTARYFLAG)" = "true" ] ; then \
|
||||
if [ "$(BUILDBIN)" != "true" ] ; then \
|
||||
rm -rf $(DOCKERFILEPATH_NOTARY)/binary && mkdir -p $(DOCKERFILEPATH_NOTARY)/binary && \
|
||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/release-$(NOTARYVERSION)/notary-signer, $(DOCKERFILEPATH_NOTARY)/binary/notary-signer) && \
|
||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/release-$(NOTARYVERSION)/notary-server, $(DOCKERFILEPATH_NOTARY)/binary/notary-server) ; \
|
||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/release-$(NOTARYVERSION)/binary-bundle.tgz, $(DOCKERFILEPATH_NOTARY)/binary-bundle.tgz); \
|
||||
cd $(DOCKERFILEPATH_NOTARY) && tar -zvxf binary-bundle.tgz && cd - ; \
|
||||
else \
|
||||
cd $(DOCKERFILEPATH_NOTARY) && $(DOCKERFILEPATH_NOTARY)/builder $(NOTARYVERSION) && cd - ; \
|
||||
cd $(DOCKERFILEPATH_NOTARY) && $(DOCKERFILEPATH_NOTARY)/builder $(NOTARYVERSION) $(NOTARYMIGRATEVERSION) && cd - ; \
|
||||
fi ; \
|
||||
$(call _get_binary, https://storage.googleapis.com/harbor-builds/bin/notary/release-$(NOTARYVERSION)/notary-migrate-postgresql.tgz, $(DOCKERFILEPATH_NOTARY)/binary/notary-migrate.tgz); \
|
||||
cd $(DOCKERFILEPATH_NOTARY)/binary && tar -zvxf notary-migrate.tgz && cd - ; \
|
||||
echo "building notary container for photon..."; \
|
||||
chmod 655 $(DOCKERFILEPATH_NOTARY)/binary/notary-signer && $(DOCKERBUILD) -f $(DOCKERFILEPATH_NOTARY)/$(DOCKERFILENAME_NOTARYSIGNER) -t $(DOCKERIMAGENAME_NOTARYSIGNER):$(NOTARYVERSION)-$(VERSIONTAG) . ; \
|
||||
chmod 655 $(DOCKERFILEPATH_NOTARY)/binary/notary-server && $(DOCKERBUILD) -f $(DOCKERFILEPATH_NOTARY)/$(DOCKERFILENAME_NOTARYSERVER) -t $(DOCKERIMAGENAME_NOTARYSERVER):$(NOTARYVERSION)-$(VERSIONTAG) . ; \
|
||||
@ -214,11 +200,10 @@ define _get_binary
|
||||
$(WGET) --timeout 30 --no-check-certificate $1 -O $2
|
||||
endef
|
||||
|
||||
build: _build_db _build_adminserver _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_notary _build_clair _build_redis _build_migrator _build_chart_server
|
||||
build: _build_db _build_portal _build_core _build_jobservice _build_log _build_nginx _build_registry _build_registryctl _build_notary _build_clair _build_redis _build_migrator _build_chart_server
|
||||
|
||||
cleanimage:
|
||||
@echo "cleaning image for photon..."
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_PORTAL):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_CORE):$(VERSIONTAG)
|
||||
- $(DOCKERRMIMAGE) -f $(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG)
|
||||
|
@ -1,15 +0,0 @@
|
||||
FROM photon:2.0
|
||||
|
||||
RUN tdnf install -y sudo >> /dev/null \
|
||||
&& tdnf clean all \
|
||||
&& groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor \
|
||||
&& mkdir /harbor/
|
||||
COPY ./make/photon/adminserver/harbor_adminserver ./make/photon/adminserver/start.sh /harbor/
|
||||
#As UI will be blocked until adminserver is ready, let adminserver do the initialise work for DB
|
||||
COPY ./make/migrations /harbor/migrations
|
||||
|
||||
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
|
||||
|
||||
RUN chmod u+x /harbor/harbor_adminserver /harbor/start.sh
|
||||
WORKDIR /harbor/
|
||||
ENTRYPOINT ["/harbor/start.sh"]
|
@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
#In the case when the config store is set to filesystem, the directory has to be writable.
|
||||
if [ -d /etc/adminserver/config ]; then
|
||||
chown -R 10000:10000 /etc/adminserver/config
|
||||
fi
|
||||
sudo -E -u \#10000 "/harbor/harbor_adminserver"
|
@ -1,8 +1,13 @@
|
||||
FROM golang:1.11.2
|
||||
|
||||
ARG NOTARY_VERSION
|
||||
ARG MIGRATE_VERSION
|
||||
RUN test -n "$NOTARY_VERSION"
|
||||
RUN test -n "$MIGRATE_VERSION"
|
||||
ENV NOTARYPKG github.com/theupdateframework/notary
|
||||
ENV MIGRATEPKG github.com/golang-migrate/migrate
|
||||
|
||||
COPY . /go/src/${NOTARYPKG}
|
||||
RUN git clone -b $NOTARY_VERSION https://github.com/theupdateframework/notary.git /go/src/${NOTARYPKG}
|
||||
WORKDIR /go/src/${NOTARYPKG}
|
||||
|
||||
RUN go install -tags pkcs11 \
|
||||
@ -10,3 +15,16 @@ RUN go install -tags pkcs11 \
|
||||
|
||||
RUN go install -tags pkcs11 \
|
||||
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" ${NOTARYPKG}/cmd/notary-signer
|
||||
RUN cp -r /go/src/${NOTARYPKG}/migrations/ /
|
||||
|
||||
RUN git clone -b $MIGRATE_VERSION https://github.com/golang-migrate/migrate /go/src/${MIGRATEPKG}
|
||||
WORKDIR /go/src/${MIGRATEPKG}
|
||||
|
||||
RUN curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 && chmod +x /usr/local/bin/dep
|
||||
RUN dep ensure -vendor-only
|
||||
|
||||
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse"
|
||||
ENV SOURCES="file go_bindata github aws_s3 google_cloud_storage"
|
||||
|
||||
RUN go install -tags "$DATABASES $SOURCES" -ldflags="-X main.Version=${MIGRATE_VERSION}" ${MIGRATEPKG}/cli && mv /go/bin/cli /go/bin/migrate
|
||||
|
||||
|
@ -2,43 +2,28 @@
|
||||
|
||||
set +e
|
||||
|
||||
if [ -z $1 ]; then
|
||||
error "Please set the 'version' variable"
|
||||
if [ -z $2 ]; then
|
||||
error "Please set the notary and migrate version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
|
||||
echo "Building notary and golang-migrate from source, notary version: $1, golang-migrate version: $2"
|
||||
set -e
|
||||
|
||||
# the temp folder to store binary file...
|
||||
mkdir -p binary
|
||||
rm -rf binary/notary-server || true
|
||||
rm -rf binary/notary-signer || true
|
||||
rm -rf binary/* || true
|
||||
|
||||
cd `dirname $0`
|
||||
cur=$PWD
|
||||
docker build --build-arg NOTARY_VERSION=$1 --build-arg MIGRATE_VERSION=$2 -f ./binary.Dockerfile -t notary-binary .
|
||||
|
||||
# the temp folder to store notary source code...
|
||||
TEMP=`mktemp -d /$TMPDIR/notary.XXXXXX`
|
||||
git clone -b $VERSION https://github.com/theupdateframework/notary.git $TEMP
|
||||
|
||||
echo 'build the notary binary bases on the golang:1.11.2...'
|
||||
cp binary.Dockerfile $TEMP
|
||||
cd $TEMP
|
||||
docker build -f binary.Dockerfile -t notary-golang $TEMP
|
||||
cp -r $TEMP/migrations binary
|
||||
|
||||
echo 'copy the notary binary to local...'
|
||||
ID=$(docker create notary-golang)
|
||||
echo $ID
|
||||
cd $cur
|
||||
docker cp $ID:/go/bin/notary-server binary
|
||||
docker cp $ID:/go/bin/notary-signer binary
|
||||
echo 'copy the binary files to local...'
|
||||
ID=$(docker create notary-binary)
|
||||
docker cp $ID:/go/bin/notary-server binary/
|
||||
docker cp $ID:/go/bin/notary-signer binary/
|
||||
docker cp $ID:/go/bin/migrate binary/
|
||||
docker cp $ID:/migrations binary/
|
||||
|
||||
docker rm -f $ID
|
||||
docker rmi -f notary-golang
|
||||
|
||||
rm -rf $TEMP
|
||||
|
||||
docker rmi -f notary-binary
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
#!/bin/sh
|
||||
sudo -E -u \#10000 sh -c "/usr/bin/env /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"
|
||||
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"
|
||||
|
@ -5,10 +5,11 @@ RUN tdnf install -y shadow sudo \
|
||||
&& groupadd -r -g 10000 notary \
|
||||
&& useradd --no-log-init -r -g 10000 -u 10000 notary
|
||||
|
||||
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
|
||||
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
|
||||
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
||||
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
||||
COPY ./make/photon/notary/server-start.sh /bin/server-start.sh
|
||||
RUN chmod u+x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/server-start.sh
|
||||
RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh
|
||||
ENV SERVICE_NAME=notary_server
|
||||
ENTRYPOINT [ "/bin/server-start.sh" ]
|
||||
|
@ -1,2 +1,2 @@
|
||||
#!/bin/sh
|
||||
sudo -E -u \#10000 sh -c "/usr/bin/env && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"
|
||||
sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"
|
||||
|
@ -4,11 +4,12 @@ RUN tdnf install -y shadow sudo \
|
||||
&& tdnf clean all \
|
||||
&& groupadd -r -g 10000 notary \
|
||||
&& useradd --no-log-init -r -g 10000 -u 10000 notary
|
||||
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
|
||||
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
|
||||
COPY ./make/photon/notary/binary/migrate /bin/migrate
|
||||
COPY ./make/photon/notary/binary/migrations/ /migrations/
|
||||
COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh
|
||||
|
||||
RUN chmod u+x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/signer-start.sh
|
||||
RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh
|
||||
ENV SERVICE_NAME=notary_signer
|
||||
ENTRYPOINT [ "/bin/signer-start.sh" ]
|
||||
|
14
make/prepare
14
make/prepare
@ -253,10 +253,6 @@ registry_custom_ca_bundle_path = rcp.get("configuration", "registry_custom_ca_bu
|
||||
core_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
jobservice_secret = ''.join(random.choice(string.ascii_letters+string.digits) for i in range(16))
|
||||
|
||||
adminserver_config_dir = os.path.join(config_dir,"adminserver")
|
||||
if not os.path.exists(adminserver_config_dir):
|
||||
os.makedirs(os.path.join(config_dir, "adminserver"))
|
||||
|
||||
core_config_dir = prep_conf_dir(config_dir,"core")
|
||||
core_certificates_dir = prep_conf_dir(core_config_dir,"certificates")
|
||||
db_config_dir = prep_conf_dir(config_dir, "db")
|
||||
@ -267,7 +263,7 @@ nginx_config_dir = prep_conf_dir (config_dir, "nginx")
|
||||
nginx_conf_d = prep_conf_dir(nginx_config_dir, "conf.d")
|
||||
log_config_dir = prep_conf_dir (config_dir, "log")
|
||||
|
||||
adminserver_conf_env = os.path.join(config_dir, "adminserver", "env")
|
||||
conf_env = os.path.join(config_dir, "core", "config_env")
|
||||
core_conf_env = os.path.join(config_dir, "core", "env")
|
||||
core_conf = os.path.join(config_dir, "core", "app.conf")
|
||||
core_cert_dir = os.path.join(config_dir, "core", "certificates")
|
||||
@ -279,8 +275,7 @@ db_conf_env = os.path.join(config_dir, "db", "env")
|
||||
job_conf_env = os.path.join(config_dir, "jobservice", "env")
|
||||
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
|
||||
cert_dir = os.path.join(config_dir, "nginx", "cert")
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
adminserver_url = "http://adminserver:8080"
|
||||
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
|
||||
registry_url = "http://registry:5000"
|
||||
registry_controller_url = "http://registryctl:8080"
|
||||
core_url = "http://core:8080"
|
||||
@ -338,8 +333,8 @@ reload_key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _
|
||||
|
||||
ldap_group_admin_dn = rcp.get("configuration", "ldap_group_admin_dn") if rcp.has_option("configuration", "ldap_group_admin_dn") else ""
|
||||
|
||||
render(os.path.join(templates_dir, "adminserver", "env"),
|
||||
adminserver_conf_env,
|
||||
render(os.path.join(templates_dir, "core", "config_env"),
|
||||
conf_env,
|
||||
reload_config=reload_config,
|
||||
public_url=public_url,
|
||||
core_url=core_url,
|
||||
@ -415,7 +410,6 @@ render(os.path.join(templates_dir, "core", "env"),
|
||||
redis_host=redis_host,
|
||||
redis_port=redis_port,
|
||||
redis_password=redis_password,
|
||||
adminserver_url = adminserver_url,
|
||||
chart_cache_driver = chart_cache_driver,
|
||||
redis_url_reg = redis_url_reg)
|
||||
|
||||
|
16432
open_source_license
16432
open_source_license
File diff suppressed because it is too large
Load Diff
@ -1,48 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func handleInternalServerError(w http.ResponseWriter) {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError),
|
||||
http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
func handleBadRequestError(w http.ResponseWriter, error string) {
|
||||
http.Error(w, error, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func handleUnauthorized(w http.ResponseWriter) {
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized),
|
||||
http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
// response status code will be written automatically if there is an error
|
||||
func writeJSON(w http.ResponseWriter, v interface{}) error {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
handleInternalServerError(w)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHandleInternalServerError(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
handleInternalServerError(w)
|
||||
|
||||
if w.Code != http.StatusInternalServerError {
|
||||
t.Errorf("unexpected status code: %d != %d", w.Code, http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleBadRequestError(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
err := "error message"
|
||||
handleBadRequestError(w, err)
|
||||
|
||||
if w.Code != http.StatusBadRequest {
|
||||
t.Errorf("unexpected status code: %d != %d", w.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleUnauthorized(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
handleUnauthorized(w)
|
||||
|
||||
if w.Code != http.StatusUnauthorized {
|
||||
t.Errorf("unexpected status code: %d != %d", w.Code, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteJSONNilInterface(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
if err := writeJSON(w, nil); err != nil {
|
||||
t.Errorf("Expected nil error, received: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteJSONMarshallErr(t *testing.T) {
|
||||
// Tests capture json.Marshall error
|
||||
x := map[string]interface{}{
|
||||
"foo": make(chan int),
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
if err := writeJSON(w, x); err == nil {
|
||||
t.Errorf("Expected %v error received: no no error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteJSON(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
if err := writeJSON(w, "Pong"); err != nil {
|
||||
t.Errorf("Expected nil error, received: %v", err)
|
||||
}
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// ListCfgs lists configurations
|
||||
func ListCfgs(w http.ResponseWriter, r *http.Request) {
|
||||
cfg, err := systemcfg.CfgStore.Read()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get system configurations: %v", err)
|
||||
handleInternalServerError(w)
|
||||
return
|
||||
}
|
||||
systemcfg.AddMissedKey(cfg)
|
||||
if err = writeJSON(w, cfg); err != nil {
|
||||
log.Errorf("failed to write response: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCfgs updates configurations
|
||||
func UpdateCfgs(w http.ResponseWriter, r *http.Request) {
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
log.Errorf("failed to read request body: %v", err)
|
||||
handleInternalServerError(w)
|
||||
return
|
||||
}
|
||||
|
||||
m := map[string]interface{}{}
|
||||
if err = json.Unmarshal(b, &m); err != nil {
|
||||
handleBadRequestError(w, err.Error())
|
||||
return
|
||||
}
|
||||
if err = systemcfg.CfgStore.Write(m); err != nil {
|
||||
log.Errorf("failed to update system configurations: %v", err)
|
||||
handleInternalServerError(w)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ResetCfgs resets configurations from environment variables
|
||||
func ResetCfgs(w http.ResponseWriter, r *http.Request) {
|
||||
cfgs := map[string]interface{}{}
|
||||
if err := systemcfg.LoadFromEnv(cfgs, true); err != nil {
|
||||
log.Errorf("failed to reset system configurations: %v", err)
|
||||
handleInternalServerError(w)
|
||||
return
|
||||
}
|
||||
if err := systemcfg.CfgStore.Write(cfgs); err != nil {
|
||||
log.Errorf("failed to write system configurations to storage: %v", err)
|
||||
handleInternalServerError(w)
|
||||
return
|
||||
}
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeCfgStore struct {
|
||||
cfgs map[string]interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Name() string {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Read() (map[string]interface{}, error) {
|
||||
return f.cfgs, f.err
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Write(cfgs map[string]interface{}) error {
|
||||
f.cfgs = cfgs
|
||||
return f.err
|
||||
}
|
||||
|
||||
func TestListCfgs(t *testing.T) {
|
||||
// 500
|
||||
systemcfg.CfgStore = &fakeCfgStore{
|
||||
cfgs: nil,
|
||||
err: errors.New("error"),
|
||||
}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
ListCfgs(w, nil)
|
||||
assert.Equal(t, http.StatusInternalServerError, w.Code)
|
||||
|
||||
// 200
|
||||
key := "key"
|
||||
value := "value"
|
||||
cfgs := map[string]interface{}{
|
||||
key: value,
|
||||
}
|
||||
systemcfg.CfgStore = &fakeCfgStore{
|
||||
cfgs: cfgs,
|
||||
err: nil,
|
||||
}
|
||||
w = httptest.NewRecorder()
|
||||
ListCfgs(w, nil)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
result, err := parse(w.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse response body: %v", err)
|
||||
}
|
||||
assert.Equal(t, value, result[key])
|
||||
}
|
||||
|
||||
func TestUpdateCfgs(t *testing.T) {
|
||||
// 400
|
||||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("", "", bytes.NewReader([]byte{'a'}))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
UpdateCfgs(w, r)
|
||||
assert.Equal(t, http.StatusBadRequest, w.Code)
|
||||
|
||||
// 500
|
||||
systemcfg.CfgStore = &fakeCfgStore{
|
||||
cfgs: nil,
|
||||
err: errors.New("error"),
|
||||
}
|
||||
w = httptest.NewRecorder()
|
||||
r, err = http.NewRequest("", "", bytes.NewBufferString("{}"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
UpdateCfgs(w, r)
|
||||
assert.Equal(t, http.StatusInternalServerError, w.Code)
|
||||
|
||||
// 200
|
||||
key := "key"
|
||||
value := "value"
|
||||
systemcfg.CfgStore = &fakeCfgStore{
|
||||
cfgs: nil,
|
||||
err: nil,
|
||||
}
|
||||
w = httptest.NewRecorder()
|
||||
r, err = http.NewRequest("", "",
|
||||
bytes.NewBufferString(fmt.Sprintf(`{"%s":"%s"}`, key, value)))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
UpdateCfgs(w, r)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
}
|
||||
|
||||
func TestResetCfgs(t *testing.T) {
|
||||
// 500
|
||||
systemcfg.CfgStore = &fakeCfgStore{
|
||||
cfgs: nil,
|
||||
err: errors.New("error"),
|
||||
}
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
ResetCfgs(w, nil)
|
||||
assert.Equal(t, http.StatusInternalServerError, w.Code)
|
||||
|
||||
// 200
|
||||
os.Clearenv()
|
||||
key := "LDAP_URL"
|
||||
value := "ldap://ldap.com"
|
||||
if err := os.Setenv(key, value); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
store := &fakeCfgStore{
|
||||
cfgs: nil,
|
||||
err: nil,
|
||||
}
|
||||
systemcfg.CfgStore = store
|
||||
w = httptest.NewRecorder()
|
||||
|
||||
ResetCfgs(w, nil)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
assert.Equal(t, value, store.cfgs[common.LDAPURL])
|
||||
}
|
||||
|
||||
func parse(reader io.Reader) (map[string]interface{}, error) {
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Ping monitor the server status
|
||||
func Ping(w http.ResponseWriter, r *http.Request) {
|
||||
if err := writeJSON(w, "Pong"); err != nil {
|
||||
log.Errorf("Failed to write response: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
Ping(w, nil)
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
result, _ := ioutil.ReadAll(w.Body)
|
||||
assert.Equal(t, "\"Pong\"", string(result))
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/secret"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Authenticator defines Authenticate function to authenticate requests
|
||||
type Authenticator interface {
|
||||
// Authenticate the request, if there is no error, the bool value
|
||||
// determines whether the request is authenticated or not
|
||||
Authenticate(req *http.Request) (bool, error)
|
||||
}
|
||||
|
||||
type secretAuthenticator struct {
|
||||
secrets map[string]string
|
||||
}
|
||||
|
||||
// NewSecretAuthenticator returns an instance of secretAuthenticator
|
||||
func NewSecretAuthenticator(secrets map[string]string) Authenticator {
|
||||
return &secretAuthenticator{
|
||||
secrets: secrets,
|
||||
}
|
||||
}
|
||||
|
||||
// Authenticate the request according the secret
|
||||
func (s *secretAuthenticator) Authenticate(req *http.Request) (bool, error) {
|
||||
if len(s.secrets) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
reqSecret := secret.FromRequest(req)
|
||||
|
||||
for _, v := range s.secrets {
|
||||
if reqSecret == v {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
@ -1,53 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
commonsecret "github.com/goharbor/harbor/src/common/secret"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAuthenticate(t *testing.T) {
|
||||
secret := "correct"
|
||||
req1, err := http.NewRequest("", "", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
|
||||
req2, err := http.NewRequest("", "", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create request: %v", err)
|
||||
}
|
||||
_ = commonsecret.AddToRequest(req2, secret)
|
||||
cases := []struct {
|
||||
secrets map[string]string
|
||||
req *http.Request
|
||||
result bool
|
||||
}{
|
||||
{nil, req1, true},
|
||||
{map[string]string{"secret1": "incorrect"}, req2, false},
|
||||
{map[string]string{"secret1": "incorrect", "secret2": secret}, req2, true},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
authenticator := NewSecretAuthenticator(c.secrets)
|
||||
authenticated, err := authenticator.Authenticate(c.req)
|
||||
assert.Nil(t, err, "unexpected error")
|
||||
assert.Equal(t, c.result, authenticated, "unexpected result")
|
||||
}
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/http"
|
||||
"github.com/goharbor/harbor/src/common/http/modifier/auth"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/core/systeminfo/imagestorage"
|
||||
)
|
||||
|
||||
// Client defines methods that an Adminserver client should implement
|
||||
type Client interface {
|
||||
// Ping tests the connection with server
|
||||
Ping() error
|
||||
// GetCfgs returns system configurations
|
||||
GetCfgs() (map[string]interface{}, error)
|
||||
// UpdateCfgs updates system configurations
|
||||
UpdateCfgs(map[string]interface{}) error
|
||||
// ResetCfgs resets system configuratoins form environment variables
|
||||
ResetCfgs() error
|
||||
// Capacity returns the capacity of image storage
|
||||
Capacity() (*imagestorage.Capacity, error)
|
||||
}
|
||||
|
||||
// NewClient return an instance of Adminserver client
|
||||
func NewClient(baseURL string, cfg *Config) Client {
|
||||
baseURL = strings.TrimRight(baseURL, "/")
|
||||
if !strings.Contains(baseURL, "://") {
|
||||
baseURL = "http://" + baseURL
|
||||
}
|
||||
client := &client{
|
||||
baseURL: baseURL,
|
||||
}
|
||||
if cfg != nil {
|
||||
authorizer := auth.NewSecretAuthorizer(cfg.Secret)
|
||||
client.client = http.NewClient(nil, authorizer)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
type client struct {
|
||||
baseURL string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// Config contains configurations needed for client
|
||||
type Config struct {
|
||||
Secret string
|
||||
}
|
||||
|
||||
func (c *client) Ping() error {
|
||||
addr := strings.Split(c.baseURL, "://")[1]
|
||||
if !strings.Contains(addr, ":") {
|
||||
addr = addr + ":80"
|
||||
}
|
||||
|
||||
return utils.TestTCPConn(addr, 60, 2)
|
||||
}
|
||||
|
||||
// GetCfgs ...
|
||||
func (c *client) GetCfgs() (map[string]interface{}, error) {
|
||||
url := c.baseURL + "/api/configs"
|
||||
cfgs := map[string]interface{}{}
|
||||
if err := c.client.Get(url, &cfgs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfgs, nil
|
||||
}
|
||||
|
||||
// UpdateCfgs ...
|
||||
func (c *client) UpdateCfgs(cfgs map[string]interface{}) error {
|
||||
url := c.baseURL + "/api/configurations"
|
||||
return c.client.Put(url, cfgs)
|
||||
}
|
||||
|
||||
// ResetCfgs ...
|
||||
func (c *client) ResetCfgs() error {
|
||||
url := c.baseURL + "/api/configurations/reset"
|
||||
return c.client.Post(url)
|
||||
}
|
||||
|
||||
// Capacity ...
|
||||
func (c *client) Capacity() (*imagestorage.Capacity, error) {
|
||||
url := c.baseURL + "/api/systeminfo/capacity"
|
||||
capacity := &imagestorage.Capacity{}
|
||||
if err := c.client.Get(url, capacity); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return capacity, nil
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/utils/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var c Client
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
server, err := test.NewAdminserver(nil)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to create adminserver: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
c = NewClient(server.URL, &Config{})
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
err := c.Ping()
|
||||
assert.Nil(t, err, "unexpected error")
|
||||
}
|
||||
|
||||
func TestGetCfgs(t *testing.T) {
|
||||
cfgs, err := c.GetCfgs()
|
||||
if !assert.Nil(t, err, "unexpected error") {
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, common.DBAuth, cfgs[common.AUTHMode], "unexpected configuration")
|
||||
}
|
||||
|
||||
func TestUpdateCfgs(t *testing.T) {
|
||||
cfgs := map[string]interface{}{
|
||||
common.AUTHMode: common.LDAPAuth,
|
||||
}
|
||||
err := c.UpdateCfgs(cfgs)
|
||||
if !assert.Nil(t, err, "unexpected error") {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestResetCfgs(t *testing.T) {
|
||||
err := c.ResetCfgs()
|
||||
if !assert.Nil(t, err, "unexpected error") {
|
||||
return
|
||||
}
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/auth"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
gorilla_handlers "github.com/gorilla/handlers"
|
||||
)
|
||||
|
||||
// NewHandler returns a gorilla router which is wrapped by authenticate handler
|
||||
// and logging handler
|
||||
func NewHandler() http.Handler {
|
||||
h := newRouter()
|
||||
secrets := map[string]string{
|
||||
"uiSecret": os.Getenv("CORE_SECRET"),
|
||||
"jobserviceSecret": os.Getenv("JOBSERVICE_SECRET"),
|
||||
}
|
||||
insecureAPIs := map[string]bool{
|
||||
"/api/ping": true,
|
||||
}
|
||||
h = newAuthHandler(auth.NewSecretAuthenticator(secrets), h, insecureAPIs)
|
||||
h = gorilla_handlers.LoggingHandler(os.Stdout, h)
|
||||
return h
|
||||
}
|
||||
|
||||
type authHandler struct {
|
||||
authenticator auth.Authenticator
|
||||
handler http.Handler
|
||||
insecureAPIs map[string]bool
|
||||
}
|
||||
|
||||
func newAuthHandler(authenticator auth.Authenticator, handler http.Handler, insecureAPIs map[string]bool) http.Handler {
|
||||
return &authHandler{
|
||||
authenticator: authenticator,
|
||||
handler: handler,
|
||||
insecureAPIs: insecureAPIs,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if a.authenticator == nil {
|
||||
if a.handler != nil {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if a.insecureAPIs != nil && a.insecureAPIs[r.URL.Path] {
|
||||
if a.handler != nil {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
}
|
||||
return
|
||||
}
|
||||
valid, err := a.authenticator.Authenticate(r)
|
||||
if err != nil {
|
||||
log.Errorf("failed to authenticate request: %v", err)
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError),
|
||||
http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized),
|
||||
http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
if a.handler != nil {
|
||||
a.handler.ServeHTTP(w, r)
|
||||
}
|
||||
return
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/auth"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeAuthenticator struct {
|
||||
authenticated bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeAuthenticator) Authenticate(req *http.Request) (bool, error) {
|
||||
return f.authenticated, f.err
|
||||
}
|
||||
|
||||
type fakeHandler struct {
|
||||
responseCode int
|
||||
}
|
||||
|
||||
func (f *fakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(f.responseCode)
|
||||
}
|
||||
|
||||
func TestNewAuthHandler(t *testing.T) {
|
||||
cases := []struct {
|
||||
authenticator auth.Authenticator
|
||||
handler http.Handler
|
||||
insecureAPIs map[string]bool
|
||||
responseCode int
|
||||
requestURL string
|
||||
}{
|
||||
|
||||
{nil, nil, nil, http.StatusOK, "http://localhost/good"},
|
||||
{&fakeAuthenticator{
|
||||
authenticated: false,
|
||||
err: nil,
|
||||
}, nil, nil, http.StatusUnauthorized, "http://localhost/hello"},
|
||||
{&fakeAuthenticator{
|
||||
authenticated: false,
|
||||
err: errors.New("error"),
|
||||
}, nil, nil, http.StatusInternalServerError, "http://localhost/hello"},
|
||||
{&fakeAuthenticator{
|
||||
authenticated: true,
|
||||
err: nil,
|
||||
}, &fakeHandler{http.StatusNotFound}, nil, http.StatusNotFound, "http://localhost/notexsit"},
|
||||
{&fakeAuthenticator{
|
||||
authenticated: false,
|
||||
err: nil,
|
||||
}, &fakeHandler{http.StatusOK}, map[string]bool{"/api/ping": true}, http.StatusOK, "http://localhost/api/ping"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
handler := newAuthHandler(c.authenticator, c.handler, c.insecureAPIs)
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", c.requestURL, nil)
|
||||
handler.ServeHTTP(w, r)
|
||||
assert.Equal(t, c.responseCode, w.Code, "unexpected response code")
|
||||
}
|
||||
handler := NewHandler()
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("GET", "http://localhost/api/ping", nil)
|
||||
handler.ServeHTTP(w, r)
|
||||
|
||||
}
|
@ -1,31 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/api"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func newRouter() http.Handler {
|
||||
r := mux.NewRouter()
|
||||
r.HandleFunc("/api/configurations", api.UpdateCfgs).Methods("PUT")
|
||||
r.HandleFunc("/api/configs", api.ListCfgs).Methods("GET")
|
||||
r.HandleFunc("/api/configurations/reset", api.ResetCfgs).Methods("POST")
|
||||
r.HandleFunc("/api/ping", api.Ping).Methods("GET")
|
||||
return r
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/handlers"
|
||||
syscfg "github.com/goharbor/harbor/src/adminserver/systemcfg"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
// Server for admin component
|
||||
type Server struct {
|
||||
Port string
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
// Serve the API
|
||||
func (s *Server) Serve() error {
|
||||
server := &http.Server{
|
||||
Addr: ":" + s.Port,
|
||||
Handler: s.Handler,
|
||||
}
|
||||
|
||||
return server.ListenAndServe()
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.Info("initializing system configurations...")
|
||||
if err := syscfg.Init(); err != nil {
|
||||
log.Fatalf("failed to initialize the system: %v", err)
|
||||
}
|
||||
log.Info("system initialization completed")
|
||||
|
||||
port := os.Getenv("PORT")
|
||||
if len(port) == 0 {
|
||||
port = "80"
|
||||
}
|
||||
server := &Server{
|
||||
Port: port,
|
||||
Handler: handlers.NewHandler(),
|
||||
}
|
||||
if err := server.Serve(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
)
|
||||
|
||||
// Encryptor encrypts or decrypts a strings
|
||||
type Encryptor interface {
|
||||
// Encrypt encrypts plaintext
|
||||
Encrypt(string) (string, error)
|
||||
// Decrypt decrypts ciphertext
|
||||
Decrypt(string) (string, error)
|
||||
}
|
||||
|
||||
// AESEncryptor uses AES to encrypt or decrypt string
|
||||
type AESEncryptor struct {
|
||||
keyProvider comcfg.KeyProvider
|
||||
keyParams map[string]interface{}
|
||||
}
|
||||
|
||||
// NewAESEncryptor returns an instance of an AESEncryptor
|
||||
func NewAESEncryptor(keyProvider comcfg.KeyProvider,
|
||||
keyParams map[string]interface{}) Encryptor {
|
||||
return &AESEncryptor{
|
||||
keyProvider: keyProvider,
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt ...
|
||||
func (a *AESEncryptor) Encrypt(plaintext string) (string, error) {
|
||||
key, err := a.keyProvider.Get(a.keyParams)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return utils.ReversibleEncrypt(plaintext, key)
|
||||
}
|
||||
|
||||
// Decrypt ...
|
||||
func (a *AESEncryptor) Decrypt(ciphertext string) (string, error) {
|
||||
key, err := a.keyProvider.Get(a.keyParams)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return utils.ReversibleDecrypt(ciphertext, key)
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeKeyProvider struct {
|
||||
key string
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeKeyProvider) Get(params map[string]interface{}) (
|
||||
string, error) {
|
||||
return f.key, f.err
|
||||
}
|
||||
|
||||
func TestEncrypt(t *testing.T) {
|
||||
cases := []struct {
|
||||
plaintext string
|
||||
keyProvider comcfg.KeyProvider
|
||||
err bool
|
||||
}{
|
||||
{"", &fakeKeyProvider{"", errors.New("error")}, true},
|
||||
{"text", &fakeKeyProvider{"1234567890123456", nil}, false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
encrptor := NewAESEncryptor(c.keyProvider, nil)
|
||||
ciphertext, err := encrptor.Encrypt(c.plaintext)
|
||||
if c.err {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
str, err := encrptor.Decrypt(ciphertext)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, c.plaintext, str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecrypt(t *testing.T) {
|
||||
plaintext := "text"
|
||||
key := "1234567890123456"
|
||||
|
||||
encrptor := NewAESEncryptor(&fakeKeyProvider{
|
||||
key: key,
|
||||
err: nil,
|
||||
}, nil)
|
||||
|
||||
ciphertext, err := encrptor.Encrypt(plaintext)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to encrpt %s: %v", plaintext, err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
ciphertext string
|
||||
keyProvider comcfg.KeyProvider
|
||||
err bool
|
||||
}{
|
||||
{"", &fakeKeyProvider{"", errors.New("error")}, true},
|
||||
{ciphertext, &fakeKeyProvider{key, nil}, false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
encrptor := NewAESEncryptor(c.keyProvider, nil)
|
||||
str, err := encrptor.Decrypt(c.ciphertext)
|
||||
if c.err {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, plaintext, str)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
)
|
||||
|
||||
const (
|
||||
name = "database"
|
||||
)
|
||||
|
||||
var (
|
||||
numKeys = map[string]bool{
|
||||
common.EmailPort: true,
|
||||
common.LDAPScope: true,
|
||||
common.LDAPGroupSearchScope: true,
|
||||
common.LDAPTimeout: true,
|
||||
common.TokenExpiration: true,
|
||||
common.MaxJobWorkers: true,
|
||||
common.CfgExpiration: true,
|
||||
common.ClairDBPort: true,
|
||||
common.PostGreSQLPort: true,
|
||||
}
|
||||
boolKeys = map[string]bool{
|
||||
common.WithClair: true,
|
||||
common.WithNotary: true,
|
||||
common.SelfRegistration: true,
|
||||
common.EmailSSL: true,
|
||||
common.EmailInsecure: true,
|
||||
common.LDAPVerifyCert: true,
|
||||
common.UAAVerifyCert: true,
|
||||
common.ReadOnly: true,
|
||||
common.WithChartMuseum: true,
|
||||
}
|
||||
mapKeys = map[string]bool{
|
||||
common.ScanAllPolicy: true,
|
||||
}
|
||||
)
|
||||
|
||||
type cfgStore struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// Name The name of the driver
|
||||
func (c *cfgStore) Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
// NewCfgStore New a cfg store for database driver
|
||||
func NewCfgStore() (store.Driver, error) {
|
||||
return &cfgStore{
|
||||
name: name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read configuration from database
|
||||
func (c *cfgStore) Read() (map[string]interface{}, error) {
|
||||
configEntries, err := dao.GetConfigEntries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapperConfig(configEntries)
|
||||
}
|
||||
|
||||
// WrapperConfig Wrapper the configuration
|
||||
func WrapperConfig(configEntries []*models.ConfigEntry) (map[string]interface{}, error) {
|
||||
config := make(map[string]interface{})
|
||||
for _, entry := range configEntries {
|
||||
if numKeys[entry.Key] {
|
||||
strvalue, err := strconv.Atoi(entry.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config[entry.Key] = float64(strvalue)
|
||||
} else if boolKeys[entry.Key] {
|
||||
strvalue, err := strconv.ParseBool(entry.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config[entry.Key] = strvalue
|
||||
} else if mapKeys[entry.Key] {
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(entry.Value), &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config[entry.Key] = m
|
||||
} else {
|
||||
config[entry.Key] = entry.Value
|
||||
}
|
||||
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Write save configuration to database
|
||||
func (c *cfgStore) Write(config map[string]interface{}) error {
|
||||
configEntries, err := TranslateConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dao.SaveConfigEntries(configEntries)
|
||||
}
|
||||
|
||||
// TranslateConfig Translate configuration from int, bool, float64 to string
|
||||
func TranslateConfig(config map[string]interface{}) ([]models.ConfigEntry, error) {
|
||||
var configEntries []models.ConfigEntry
|
||||
for k, v := range config {
|
||||
var entry = new(models.ConfigEntry)
|
||||
entry.Key = k
|
||||
switch v.(type) {
|
||||
case string:
|
||||
entry.Value = v.(string)
|
||||
case int:
|
||||
entry.Value = strconv.Itoa(v.(int))
|
||||
case bool:
|
||||
entry.Value = strconv.FormatBool(v.(bool))
|
||||
case float64:
|
||||
entry.Value = strconv.Itoa(int(v.(float64)))
|
||||
case map[string]interface{}:
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Value = string(data)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type %v", v)
|
||||
}
|
||||
configEntries = append(configEntries, *entry)
|
||||
}
|
||||
return configEntries, nil
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCfgStore_Name(t *testing.T) {
|
||||
driver, err := NewCfgStore()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create db configuration store %v", err)
|
||||
}
|
||||
assert.Equal(t, name, driver.Name())
|
||||
}
|
||||
|
||||
func TestWrapperConfig(t *testing.T) {
|
||||
cfg := []*models.ConfigEntry{
|
||||
{
|
||||
Key: common.CfgExpiration,
|
||||
Value: "500",
|
||||
},
|
||||
{
|
||||
Key: common.WithNotary,
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Key: common.PostGreSQLHOST,
|
||||
Value: "192.168.1.210",
|
||||
},
|
||||
}
|
||||
result, err := WrapperConfig(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to wrapper config %v", err)
|
||||
}
|
||||
withNotary, _ := result[common.WithNotary].(bool)
|
||||
assert.Equal(t, true, withNotary)
|
||||
|
||||
postgresqlhost, ok := result[common.PostGreSQLHOST].(string)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "192.168.1.210", postgresqlhost)
|
||||
|
||||
expiration, ok := result[common.CfgExpiration].(float64)
|
||||
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, float64(500), expiration)
|
||||
}
|
||||
|
||||
func TestTranslateConfig(t *testing.T) {
|
||||
config := map[string]interface{}{}
|
||||
config[common.PostGreSQLHOST] = "192.168.1.210"
|
||||
|
||||
entries, err := TranslateConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to translate configuration %v", err)
|
||||
}
|
||||
assert.Equal(t, "192.168.1.210", entries[0].Value)
|
||||
config = make(map[string]interface{})
|
||||
config[common.WithNotary] = true
|
||||
entries, err = TranslateConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to translate configuration %v", err)
|
||||
}
|
||||
assert.Equal(t, "true", entries[0].Value)
|
||||
|
||||
config = make(map[string]interface{})
|
||||
config[common.CfgExpiration] = float64(500)
|
||||
entries, err = TranslateConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to translate configuration %v", err)
|
||||
}
|
||||
assert.Equal(t, "500", entries[0].Value)
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package store
|
||||
|
||||
// Driver defines methods that a configuration store driver must implement
|
||||
type Driver interface {
|
||||
// Name returns a human-readable name of the driver
|
||||
Name() string
|
||||
// Read reads all the configurations from store
|
||||
Read() (map[string]interface{}, error)
|
||||
// Write writes the configurations to store, the configurations can be
|
||||
// part of all
|
||||
Write(map[string]interface{}) error
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/encrypt"
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
const (
|
||||
name = "encrypt"
|
||||
)
|
||||
|
||||
// cfgStore wraps a store.Driver with an encryptor
|
||||
type cfgStore struct {
|
||||
// attrs need to be encrypted and decrypted
|
||||
keys []string
|
||||
encryptor encrypt.Encryptor
|
||||
store store.Driver
|
||||
}
|
||||
|
||||
// NewCfgStore returns an instance of cfgStore
|
||||
// keys are the attrs need to be encrypted or decrypted
|
||||
func NewCfgStore(encryptor encrypt.Encryptor,
|
||||
keys []string, store store.Driver) store.Driver {
|
||||
return &cfgStore{
|
||||
keys: keys,
|
||||
encryptor: encryptor,
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cfgStore) Name() string {
|
||||
return name
|
||||
}
|
||||
|
||||
func (c *cfgStore) Read() (map[string]interface{}, error) {
|
||||
m, err := c.store.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, key := range c.keys {
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
log.Warningf("the value of %s is not string, skip decrypt", key)
|
||||
continue
|
||||
}
|
||||
|
||||
text, err := c.encryptor.Decrypt(str)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[key] = text
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *cfgStore) Write(m map[string]interface{}) error {
|
||||
for _, key := range c.keys {
|
||||
v, ok := m[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
log.Warningf("%v is not string, skip encrypt", v)
|
||||
continue
|
||||
}
|
||||
|
||||
ciphertext, err := c.encryptor.Encrypt(str)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[key] = ciphertext
|
||||
}
|
||||
return c.store.Write(m)
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package encrypt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type fakeCfgStore struct {
|
||||
cfgs map[string]interface{}
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Name() string {
|
||||
return "fake"
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Read() (map[string]interface{}, error) {
|
||||
return f.cfgs, nil
|
||||
}
|
||||
|
||||
func (f *fakeCfgStore) Write(cfgs map[string]interface{}) error {
|
||||
f.cfgs = cfgs
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeEncryptor struct {
|
||||
}
|
||||
|
||||
func (f *fakeEncryptor) Encrypt(plaintext string) (string, error) {
|
||||
return "encrypted" + plaintext, nil
|
||||
}
|
||||
|
||||
func (f *fakeEncryptor) Decrypt(ciphertext string) (string, error) {
|
||||
return "decrypted" + ciphertext, nil
|
||||
}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
driver := NewCfgStore(nil, nil, nil)
|
||||
assert.Equal(t, name, driver.Name())
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
keys := []string{"key"}
|
||||
driver := NewCfgStore(&fakeEncryptor{}, keys, &fakeCfgStore{
|
||||
cfgs: map[string]interface{}{"key": "value"},
|
||||
})
|
||||
|
||||
cfgs, err := driver.Read()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "decryptedvalue", cfgs["key"])
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
keys := []string{"key"}
|
||||
store := &fakeCfgStore{
|
||||
cfgs: map[string]interface{}{},
|
||||
}
|
||||
driver := NewCfgStore(&fakeEncryptor{}, keys, store)
|
||||
|
||||
cfgs := map[string]interface{}{
|
||||
"key": "value",
|
||||
}
|
||||
|
||||
err := driver.Write(cfgs)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "encryptedvalue", store.cfgs["key"])
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// the default path of configuration file
|
||||
defaultPath = "/etc/harbor/config.json"
|
||||
)
|
||||
|
||||
type cfgStore struct {
|
||||
path string // the path of cfg file
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewCfgStore returns an instance of cfgStore that stores the configurations
|
||||
// in a json file. The file will be created if it does not exist.
|
||||
func NewCfgStore(path ...string) (store.Driver, error) {
|
||||
p := defaultPath
|
||||
if len(path) > 0 && len(path[0]) > 0 {
|
||||
p = path[0]
|
||||
}
|
||||
|
||||
log.Debugf("path of configuration file: %s", p)
|
||||
|
||||
if _, err := os.Stat(p); os.IsNotExist(err) {
|
||||
log.Infof("the configuration file %s does not exist, creating it...", p)
|
||||
if err = os.MkdirAll(filepath.Dir(p), 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ioutil.WriteFile(p, []byte{}, 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &cfgStore{
|
||||
path: p,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Name ...
|
||||
func (c *cfgStore) Name() string {
|
||||
return "JSON"
|
||||
}
|
||||
|
||||
// Read ...
|
||||
func (c *cfgStore) Read() (map[string]interface{}, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return read(c.path)
|
||||
}
|
||||
|
||||
func read(path string) (map[string]interface{}, error) {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// empty file
|
||||
if len(b) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
config := map[string]interface{}{}
|
||||
if err = json.Unmarshal(b, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
// Write ...
|
||||
func (c *cfgStore) Write(config map[string]interface{}) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
cfg, err := read(c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg == nil {
|
||||
cfg = config
|
||||
} else {
|
||||
for k, v := range config {
|
||||
cfg[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(cfg, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(c.path, b, 0600)
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadWrite(t *testing.T) {
|
||||
path := "/tmp/config.json"
|
||||
store, err := NewCfgStore(path)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create json cfg store: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove(path); err != nil {
|
||||
t.Fatalf("failed to remove the json file %s: %v", path, err)
|
||||
}
|
||||
}()
|
||||
|
||||
if store.Name() != "JSON" {
|
||||
t.Errorf("unexpected name: %s != %s", store.Name(), "JSON")
|
||||
return
|
||||
}
|
||||
|
||||
config := map[string]interface{}{
|
||||
"key": "value",
|
||||
}
|
||||
if err := store.Write(config); err != nil {
|
||||
t.Errorf("failed to write configurations to json file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = store.Read(); err != nil {
|
||||
t.Errorf("failed to read configurations from json file: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
@ -1,483 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package systemcfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
enpt "github.com/goharbor/harbor/src/adminserver/systemcfg/encrypt"
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store"
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store/database"
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store/encrypt"
|
||||
"github.com/goharbor/harbor/src/adminserver/systemcfg/store/json"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
comcfg "github.com/goharbor/harbor/src/common/config"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultJSONCfgStorePath string = "/etc/adminserver/config/config.json"
|
||||
defaultKeyPath string = "/etc/adminserver/key"
|
||||
ldapScopeKey string = "ldap_scope"
|
||||
)
|
||||
|
||||
var (
|
||||
// CfgStore is a storage driver that configurations
|
||||
// can be read from and wrote to
|
||||
CfgStore store.Driver
|
||||
|
||||
// attrs need to be encrypted or decrypted
|
||||
attrs = []string{
|
||||
common.EmailPassword,
|
||||
common.LDAPSearchPwd,
|
||||
common.PostGreSQLPassword,
|
||||
common.AdminInitialPassword,
|
||||
common.ClairDBPassword,
|
||||
common.UAAClientSecret,
|
||||
}
|
||||
|
||||
// all configurations need read from environment variables
|
||||
allEnvs = map[string]interface{}{
|
||||
common.ExtEndpoint: "EXT_ENDPOINT",
|
||||
common.AUTHMode: "AUTH_MODE",
|
||||
common.SelfRegistration: &parser{
|
||||
env: "SELF_REGISTRATION",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.DatabaseType: "DATABASE_TYPE",
|
||||
common.PostGreSQLHOST: "POSTGRESQL_HOST",
|
||||
common.PostGreSQLPort: &parser{
|
||||
env: "POSTGRESQL_PORT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
|
||||
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
|
||||
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
|
||||
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
|
||||
common.LDAPURL: "LDAP_URL",
|
||||
common.LDAPSearchDN: "LDAP_SEARCH_DN",
|
||||
common.LDAPSearchPwd: "LDAP_SEARCH_PWD",
|
||||
common.LDAPBaseDN: "LDAP_BASE_DN",
|
||||
common.LDAPFilter: "LDAP_FILTER",
|
||||
common.LDAPUID: "LDAP_UID",
|
||||
common.LDAPScope: &parser{
|
||||
env: "LDAP_SCOPE",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.LDAPTimeout: &parser{
|
||||
env: "LDAP_TIMEOUT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.LDAPVerifyCert: &parser{
|
||||
env: "LDAP_VERIFY_CERT",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.LDAPGroupBaseDN: "LDAP_GROUP_BASEDN",
|
||||
common.LDAPGroupSearchFilter: "LDAP_GROUP_FILTER",
|
||||
common.LDAPGroupAttributeName: "LDAP_GROUP_GID",
|
||||
common.LDAPGroupSearchScope: &parser{
|
||||
env: "LDAP_GROUP_SCOPE",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.EmailHost: "EMAIL_HOST",
|
||||
common.EmailPort: &parser{
|
||||
env: "EMAIL_PORT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.EmailUsername: "EMAIL_USR",
|
||||
common.EmailPassword: "EMAIL_PWD",
|
||||
common.EmailSSL: &parser{
|
||||
env: "EMAIL_SSL",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.EmailInsecure: &parser{
|
||||
env: "EMAIL_INSECURE",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.EmailFrom: "EMAIL_FROM",
|
||||
common.EmailIdentity: "EMAIL_IDENTITY",
|
||||
common.RegistryURL: "REGISTRY_URL",
|
||||
common.TokenExpiration: &parser{
|
||||
env: "TOKEN_EXPIRATION",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.CfgExpiration: &parser{
|
||||
env: "CFG_EXPIRATION",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.MaxJobWorkers: &parser{
|
||||
env: "MAX_JOB_WORKERS",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.ProjectCreationRestriction: "PROJECT_CREATION_RESTRICTION",
|
||||
common.AdminInitialPassword: "HARBOR_ADMIN_PASSWORD",
|
||||
common.AdmiralEndpoint: "ADMIRAL_URL",
|
||||
common.WithNotary: &parser{
|
||||
env: "WITH_NOTARY",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.WithClair: &parser{
|
||||
env: "WITH_CLAIR",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.ClairDBPassword: "CLAIR_DB_PASSWORD",
|
||||
common.ClairDB: "CLAIR_DB",
|
||||
common.ClairDBUsername: "CLAIR_DB_USERNAME",
|
||||
common.ClairDBHost: "CLAIR_DB_HOST",
|
||||
common.ClairDBPort: &parser{
|
||||
env: "CLAIR_DB_PORT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.ClairDBSSLMode: "CLAIR_DB_SSLMODE",
|
||||
common.UAAEndpoint: "UAA_ENDPOINT",
|
||||
common.UAAClientID: "UAA_CLIENTID",
|
||||
common.UAAClientSecret: "UAA_CLIENTSECRET",
|
||||
common.UAAVerifyCert: &parser{
|
||||
env: "UAA_VERIFY_CERT",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.CoreURL: "CORE_URL",
|
||||
common.JobServiceURL: "JOBSERVICE_URL",
|
||||
common.TokenServiceURL: "TOKEN_SERVICE_URL",
|
||||
common.ClairURL: "CLAIR_URL",
|
||||
common.NotaryURL: "NOTARY_URL",
|
||||
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
||||
common.ReadOnly: &parser{
|
||||
env: "READ_ONLY",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.ReloadKey: "RELOAD_KEY",
|
||||
common.LdapGroupAdminDn: "LDAP_GROUP_ADMIN_DN",
|
||||
common.ChartRepoURL: "CHART_REPOSITORY_URL",
|
||||
common.WithChartMuseum: &parser{
|
||||
env: "WITH_CHARTMUSEUM",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
}
|
||||
|
||||
// configurations need read from environment variables
|
||||
// every time the system startup
|
||||
repeatLoadEnvs = map[string]interface{}{
|
||||
common.ExtEndpoint: "EXT_ENDPOINT",
|
||||
common.PostGreSQLHOST: "POSTGRESQL_HOST",
|
||||
common.PostGreSQLPort: &parser{
|
||||
env: "POSTGRESQL_PORT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.PostGreSQLUsername: "POSTGRESQL_USERNAME",
|
||||
common.PostGreSQLPassword: "POSTGRESQL_PASSWORD",
|
||||
common.PostGreSQLDatabase: "POSTGRESQL_DATABASE",
|
||||
common.PostGreSQLSSLMode: "POSTGRESQL_SSLMODE",
|
||||
common.MaxJobWorkers: &parser{
|
||||
env: "MAX_JOB_WORKERS",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.CfgExpiration: &parser{
|
||||
env: "CFG_EXPIRATION",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.AdmiralEndpoint: "ADMIRAL_URL",
|
||||
common.WithNotary: &parser{
|
||||
env: "WITH_NOTARY",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.WithClair: &parser{
|
||||
env: "WITH_CLAIR",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.ClairDBPassword: "CLAIR_DB_PASSWORD",
|
||||
common.ClairDBHost: "CLAIR_DB_HOST",
|
||||
common.ClairDBUsername: "CLAIR_DB_USERNAME",
|
||||
common.ClairDBPort: &parser{
|
||||
env: "CLAIR_DB_PORT",
|
||||
parse: parseStringToInt,
|
||||
},
|
||||
common.ClairDBSSLMode: "CLAIR_DB_SSLMODE",
|
||||
common.UAAEndpoint: "UAA_ENDPOINT",
|
||||
common.UAAClientID: "UAA_CLIENTID",
|
||||
common.UAAClientSecret: "UAA_CLIENTSECRET",
|
||||
common.UAAVerifyCert: &parser{
|
||||
env: "UAA_VERIFY_CERT",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
common.RegistryStorageProviderName: "REGISTRY_STORAGE_PROVIDER_NAME",
|
||||
common.CoreURL: "CORE_URL",
|
||||
common.JobServiceURL: "JOBSERVICE_URL",
|
||||
common.RegistryURL: "REGISTRY_URL",
|
||||
common.TokenServiceURL: "TOKEN_SERVICE_URL",
|
||||
common.ClairURL: "CLAIR_URL",
|
||||
common.NotaryURL: "NOTARY_URL",
|
||||
common.DatabaseType: "DATABASE_TYPE",
|
||||
common.ChartRepoURL: "CHART_REPOSITORY_URL",
|
||||
common.WithChartMuseum: &parser{
|
||||
env: "WITH_CHARTMUSEUM",
|
||||
parse: parseStringToBool,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
// the name of env
|
||||
env string
|
||||
// parse the value of env, e.g. parse string to int or
|
||||
// parse string to bool
|
||||
parse func(string) (interface{}, error)
|
||||
}
|
||||
|
||||
func parseStringToInt(str string) (interface{}, error) {
|
||||
if len(str) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.Atoi(str)
|
||||
}
|
||||
|
||||
func parseStringToBool(str string) (interface{}, error) {
|
||||
return strings.ToLower(str) == "true" ||
|
||||
strings.ToLower(str) == "on", nil
|
||||
}
|
||||
|
||||
// Init system configurations. If env RESET is set or configurations
|
||||
// read from storage driver is null, load all configurations from env
|
||||
func Init() (err error) {
|
||||
// init database
|
||||
envCfgs := map[string]interface{}{}
|
||||
if err := LoadFromEnv(envCfgs, true); err != nil {
|
||||
return err
|
||||
}
|
||||
db := GetDatabaseFromCfg(envCfgs)
|
||||
if err := dao.InitDatabase(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dao.UpgradeSchema(db); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dao.CheckSchemaVersion(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initCfgStore(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use reload key to avoid reset customed setting after restart
|
||||
curCfgs, err := CfgStore.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
loadAll := isLoadAll(curCfgs)
|
||||
if curCfgs == nil {
|
||||
curCfgs = map[string]interface{}{}
|
||||
}
|
||||
// restart: only repeatload envs will be load
|
||||
// reload_config: all envs will be reload except the skiped envs
|
||||
if err = LoadFromEnv(curCfgs, loadAll); err != nil {
|
||||
return err
|
||||
}
|
||||
AddMissedKey(curCfgs)
|
||||
return CfgStore.Write(curCfgs)
|
||||
}
|
||||
|
||||
func isLoadAll(cfg map[string]interface{}) bool {
|
||||
return cfg == nil || strings.EqualFold(os.Getenv("RESET"), "true") && os.Getenv("RELOAD_KEY") != cfg[common.ReloadKey]
|
||||
}
|
||||
|
||||
func initCfgStore() (err error) {
|
||||
|
||||
drivertype := os.Getenv("CFG_DRIVER")
|
||||
if len(drivertype) == 0 {
|
||||
drivertype = common.CfgDriverDB
|
||||
}
|
||||
path := os.Getenv("JSON_CFG_STORE_PATH")
|
||||
if len(path) == 0 {
|
||||
path = defaultJSONCfgStorePath
|
||||
}
|
||||
log.Infof("the path of json configuration storage: %s", path)
|
||||
|
||||
if drivertype == common.CfgDriverDB {
|
||||
CfgStore, err = database.NewCfgStore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// migration check: if no data in the db , then will try to load from path
|
||||
m, err := CfgStore.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m == nil || len(m) == 0 {
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
jsondriver, err := json.NewCfgStore(path)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to migrate configuration from %s", path)
|
||||
return err
|
||||
}
|
||||
jsonconfig, err := jsondriver.Read()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to read old configuration from %s", path)
|
||||
return err
|
||||
}
|
||||
// Update LDAP Scope for migration
|
||||
// only used when migrating harbor release before v1.3
|
||||
// after v1.3 there is always a db configuration before migrate.
|
||||
validLdapScope(jsonconfig, true)
|
||||
err = CfgStore.Write(jsonconfig)
|
||||
if err != nil {
|
||||
log.Error("Failed to update old configuration to database")
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
CfgStore, err = json.NewCfgStore(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
kp := os.Getenv("KEY_PATH")
|
||||
if len(kp) == 0 {
|
||||
kp = defaultKeyPath
|
||||
}
|
||||
log.Infof("the path of key used by key provider: %s", kp)
|
||||
|
||||
encryptor := enpt.NewAESEncryptor(
|
||||
comcfg.NewFileKeyProvider(kp), nil)
|
||||
|
||||
CfgStore = encrypt.NewCfgStore(encryptor, attrs, CfgStore)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadFromEnv loads the configurations from allEnvs, if all is false, it just loads
|
||||
// the repeatLoadEnvs and the env which is absent in cfgs
|
||||
func LoadFromEnv(cfgs map[string]interface{}, all bool) error {
|
||||
var envs map[string]interface{}
|
||||
|
||||
if all {
|
||||
envs = allEnvs
|
||||
} else {
|
||||
envs = make(map[string]interface{})
|
||||
for k, v := range repeatLoadEnvs {
|
||||
envs[k] = v
|
||||
}
|
||||
for k, v := range allEnvs {
|
||||
if _, exist := cfgs[k]; !exist {
|
||||
envs[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reloadCfg := os.Getenv("RESET")
|
||||
skipPattern := os.Getenv("SKIP_RELOAD_ENV_PATTERN")
|
||||
skipPattern = strings.TrimSpace(skipPattern)
|
||||
if len(skipPattern) == 0 {
|
||||
skipPattern = "$^" // doesn't match any string by default
|
||||
}
|
||||
skipMatcher, err := regexp.Compile(skipPattern)
|
||||
if err != nil {
|
||||
log.Errorf("Regular express parse error, skipPattern:%v", skipPattern)
|
||||
skipMatcher = regexp.MustCompile("$^")
|
||||
}
|
||||
|
||||
for k, v := range envs {
|
||||
if str, ok := v.(string); ok {
|
||||
if skipMatcher.MatchString(str) && strings.EqualFold(reloadCfg, "true") {
|
||||
continue
|
||||
}
|
||||
cfgs[k] = os.Getenv(str)
|
||||
continue
|
||||
}
|
||||
|
||||
if parser, ok := v.(*parser); ok {
|
||||
if skipMatcher.MatchString(parser.env) && strings.EqualFold(reloadCfg, "true") {
|
||||
continue
|
||||
}
|
||||
i, err := parser.parse(os.Getenv(parser.env))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgs[k] = i
|
||||
continue
|
||||
}
|
||||
|
||||
return fmt.Errorf("%v is not string or parse type", v)
|
||||
}
|
||||
validLdapScope(cfgs, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDatabaseFromCfg Create database object from config
|
||||
func GetDatabaseFromCfg(cfg map[string]interface{}) *models.Database {
|
||||
database := &models.Database{}
|
||||
database.Type = cfg[common.DatabaseType].(string)
|
||||
postgresql := &models.PostGreSQL{}
|
||||
postgresql.Host = utils.SafeCastString(cfg[common.PostGreSQLHOST])
|
||||
postgresql.Port = int(utils.SafeCastInt(cfg[common.PostGreSQLPort]))
|
||||
postgresql.Username = utils.SafeCastString(cfg[common.PostGreSQLUsername])
|
||||
postgresql.Password = utils.SafeCastString(cfg[common.PostGreSQLPassword])
|
||||
postgresql.Database = utils.SafeCastString(cfg[common.PostGreSQLDatabase])
|
||||
postgresql.SSLMode = utils.SafeCastString(cfg[common.PostGreSQLSSLMode])
|
||||
database.PostGreSQL = postgresql
|
||||
return database
|
||||
}
|
||||
|
||||
// Valid LDAP Scope
|
||||
func validLdapScope(cfg map[string]interface{}, isMigrate bool) {
|
||||
ldapScope, ok := cfg[ldapScopeKey].(int)
|
||||
if !ok {
|
||||
ldapScopeFloat, ok := cfg[ldapScopeKey].(float64)
|
||||
if ok {
|
||||
ldapScope = int(ldapScopeFloat)
|
||||
}
|
||||
}
|
||||
if isMigrate && ldapScope > 0 && ldapScope < 3 {
|
||||
ldapScope = ldapScope - 1
|
||||
}
|
||||
if ldapScope >= 3 {
|
||||
ldapScope = 2
|
||||
}
|
||||
if ldapScope < 0 {
|
||||
ldapScope = 0
|
||||
}
|
||||
cfg[ldapScopeKey] = ldapScope
|
||||
|
||||
}
|
||||
|
||||
// AddMissedKey ... If the configure key is missing in the cfg map, add default value to it
|
||||
func AddMissedKey(cfg map[string]interface{}) {
|
||||
|
||||
for k, v := range common.HarborStringKeysMap {
|
||||
if _, exist := cfg[k]; !exist {
|
||||
cfg[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range common.HarborNumKeysMap {
|
||||
if _, exist := cfg[k]; !exist {
|
||||
cfg[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range common.HarborBoolKeysMap {
|
||||
if _, exist := cfg[k]; !exist {
|
||||
cfg[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -1,290 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package systemcfg
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseStringToInt(t *testing.T) {
|
||||
cases := []struct {
|
||||
input string
|
||||
result int
|
||||
}{
|
||||
{"1", 1},
|
||||
{"-1", -1},
|
||||
{"0", 0},
|
||||
{"", 0},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
i, err := parseStringToInt(c.input)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, c.result, i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseStringToBool(t *testing.T) {
|
||||
cases := []struct {
|
||||
input string
|
||||
result bool
|
||||
}{
|
||||
{"true", true},
|
||||
{"on", true},
|
||||
{"TRUE", true},
|
||||
{"ON", true},
|
||||
{"other", false},
|
||||
{"", false},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
b, _ := parseStringToBool(c.input)
|
||||
assert.Equal(t, c.result, b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitCfgStore(t *testing.T) {
|
||||
os.Clearenv()
|
||||
path := "/tmp/config.json"
|
||||
if err := os.Setenv("CFG_DRIVER", "json"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("JSON_CFG_STORE_PATH", path); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(path)
|
||||
err := initCfgStore()
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestLoadFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
ldapURL := "ldap://ldap.com"
|
||||
extEndpoint := "http://harbor.com"
|
||||
if err := os.Setenv("LDAP_URL", ldapURL); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
cfgs := map[string]interface{}{}
|
||||
err := LoadFromEnv(cfgs, true)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ldapURL, cfgs[common.LDAPURL])
|
||||
|
||||
os.Clearenv()
|
||||
if err := os.Setenv("LDAP_URL", ldapURL); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("EXT_ENDPOINT", extEndpoint); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("LDAP_VERIFY_CERT", "false"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
cfgs = map[string]interface{}{}
|
||||
err = LoadFromEnv(cfgs, false)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, extEndpoint, cfgs[common.ExtEndpoint])
|
||||
assert.Equal(t, ldapURL, cfgs[common.LDAPURL])
|
||||
assert.Equal(t, false, cfgs[common.LDAPVerifyCert])
|
||||
|
||||
os.Clearenv()
|
||||
if err := os.Setenv("LDAP_URL", ldapURL); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("EXT_ENDPOINT", extEndpoint); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("LDAP_VERIFY_CERT", "true"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
cfgs = map[string]interface{}{
|
||||
common.LDAPURL: "ldap_url",
|
||||
}
|
||||
err = LoadFromEnv(cfgs, false)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, extEndpoint, cfgs[common.ExtEndpoint])
|
||||
assert.Equal(t, "ldap_url", cfgs[common.LDAPURL])
|
||||
assert.Equal(t, true, cfgs[common.LDAPVerifyCert])
|
||||
|
||||
}
|
||||
|
||||
func TestIsLoadAll(t *testing.T) {
|
||||
os.Clearenv()
|
||||
if err := os.Setenv("RELOAD_KEY", "123456"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("RESET", "True"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
cfg1 := map[string]interface{}{common.ReloadKey: "123456"}
|
||||
cfg2 := map[string]interface{}{common.ReloadKey: "654321"}
|
||||
assert.False(t, isLoadAll(cfg1))
|
||||
assert.True(t, isLoadAll(cfg2))
|
||||
}
|
||||
|
||||
func TestLoadFromEnvWithReloadConfigInvalidSkipPattern(t *testing.T) {
|
||||
os.Clearenv()
|
||||
ldapURL := "ldap://ldap.com"
|
||||
extEndpoint := "http://harbor.com"
|
||||
cfgsReload := map[string]interface{}{
|
||||
common.LDAPURL: "ldap_url",
|
||||
}
|
||||
if err := os.Setenv("LDAP_URL", ldapURL); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("EXT_ENDPOINT", extEndpoint); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("LDAP_VERIFY_CERT", "false"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("SKIP_RELOAD_ENV_PATTERN", "a(b"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
err := LoadFromEnv(cfgsReload, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load From env: %v", err)
|
||||
}
|
||||
assert.Equal(t, ldapURL, cfgsReload[common.LDAPURL])
|
||||
|
||||
os.Clearenv()
|
||||
|
||||
}
|
||||
|
||||
func TestLoadFromEnvWithReloadConfigSkipPattern(t *testing.T) {
|
||||
os.Clearenv()
|
||||
ldapURL := "ldap://ldap.com"
|
||||
extEndpoint := "http://harbor.com"
|
||||
cfgsReload := map[string]interface{}{
|
||||
common.LDAPURL: "ldap_url",
|
||||
}
|
||||
if err := os.Setenv("LDAP_URL", ldapURL); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("EXT_ENDPOINT", extEndpoint); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Setenv("LDAP_VERIFY_CERT", "false"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("SKIP_RELOAD_ENV_PATTERN", "^LDAP.*"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
if err := os.Setenv("RESET", "true"); err != nil {
|
||||
t.Fatalf("failed to set env: %v", err)
|
||||
}
|
||||
err := LoadFromEnv(cfgsReload, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load From env: %v", err)
|
||||
}
|
||||
assert.Equal(t, "ldap_url", cfgsReload[common.LDAPURL]) // env value ignored
|
||||
|
||||
os.Clearenv()
|
||||
|
||||
}
|
||||
func TestGetDatabaseFromCfg(t *testing.T) {
|
||||
cfg := map[string]interface{}{
|
||||
common.DatabaseType: "postgresql",
|
||||
common.PostGreSQLDatabase: "registry",
|
||||
common.PostGreSQLHOST: "127.0.0.1",
|
||||
common.PostGreSQLPort: 5432,
|
||||
common.PostGreSQLPassword: "root123",
|
||||
common.PostGreSQLUsername: "postgres",
|
||||
}
|
||||
|
||||
database := GetDatabaseFromCfg(cfg)
|
||||
|
||||
assert.Equal(t, "postgresql", database.Type)
|
||||
}
|
||||
|
||||
func TestValidLdapScope(t *testing.T) {
|
||||
var dbValue float64
|
||||
dbValue = 2
|
||||
ldapScopeKey := "ldap_scope"
|
||||
testCfgs := []struct {
|
||||
config map[string]interface{}
|
||||
migrate bool
|
||||
ldapScopeResult int
|
||||
}{
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: 1,
|
||||
}, true, 0},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: 2,
|
||||
}, true, 1},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: 3,
|
||||
}, true, 2},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: -1,
|
||||
}, true, 0},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: 100,
|
||||
}, false, 2},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: -100,
|
||||
}, false, 0},
|
||||
{map[string]interface{}{
|
||||
ldapScopeKey: dbValue,
|
||||
}, false, 2},
|
||||
}
|
||||
|
||||
for i, item := range testCfgs {
|
||||
validLdapScope(item.config, item.migrate)
|
||||
if item.config[ldapScopeKey].(int) != item.ldapScopeResult {
|
||||
t.Fatalf("Failed to update ldapScope expected %v, actual %v at index %v", item.ldapScopeResult, item.config[ldapScopeKey], i)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
func Test_AddMissingKey(t *testing.T) {
|
||||
|
||||
cfg := map[string]interface{}{
|
||||
common.LDAPURL: "sampleurl",
|
||||
common.EmailPort: 555,
|
||||
common.LDAPVerifyCert: true,
|
||||
}
|
||||
|
||||
type args struct {
|
||||
cfg map[string]interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{"Add default value", args{cfg}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
AddMissedKey(tt.args.cfg)
|
||||
})
|
||||
}
|
||||
|
||||
if _, ok := cfg[common.LDAPBaseDN]; !ok {
|
||||
t.Errorf("Can not found default value for %v", common.LDAPBaseDN)
|
||||
}
|
||||
|
||||
}
|
6
src/cmd/migrate-patch/README.md
Normal file
6
src/cmd/migrate-patch/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
# Migrate Patch
|
||||
This is a simple program to fix the breakage that was introduced by migrate in notary.
|
||||
## Usage
|
||||
```sh
|
||||
patch -database <db_url>
|
||||
```
|
71
src/cmd/migrate-patch/main.go
Normal file
71
src/cmd/migrate-patch/main.go
Normal file
@ -0,0 +1,71 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"flag"
|
||||
_ "github.com/lib/pq"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var dbURL string
|
||||
|
||||
const pgSQLAlterStmt string = `ALTER TABLE schema_migrations ADD COLUMN "dirty" boolean NOT NULL DEFAULT false`
|
||||
const pgSQLCheckColStmt string = `SELECT T1.C1, T2.C2 FROM
|
||||
(SELECT COUNT(*) AS C1 FROM information_schema.tables WHERE table_name='schema_migrations') T1,
|
||||
(SELECT COUNT(*) AS C2 FROM information_schema.columns WHERE table_name='schema_migrations' and column_name='dirty') T2`
|
||||
const pgSQLDelRows string = `DELETE FROM schema_migrations t WHERE t.version < ( SELECT MAX(version) FROM schema_migrations )`
|
||||
|
||||
func init() {
|
||||
urlUsage := `The URL to the target database (driver://url). Currently it only supports postgres`
|
||||
flag.StringVar(&dbURL, "database", "", urlUsage)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
log.Printf("Updating database.")
|
||||
if !strings.HasPrefix(dbURL, "postgres://") {
|
||||
log.Fatalf("Invalid URL: '%s'\n", dbURL)
|
||||
}
|
||||
db, err := sql.Open("postgres", dbURL)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to Database, error: %v\n", err)
|
||||
}
|
||||
defer db.Close()
|
||||
c := make(chan int, 1)
|
||||
go func() {
|
||||
err := db.Ping()
|
||||
for ; err != nil; err = db.Ping() {
|
||||
log.Printf("Failed to Ping DB, sleep for 1 second.\n")
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
c <- 1
|
||||
}()
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(30 * time.Second):
|
||||
log.Fatal("Failed to connect DB after 30 seconds, time out. \n")
|
||||
|
||||
}
|
||||
row := db.QueryRow(pgSQLCheckColStmt)
|
||||
var tblCount, colCount int
|
||||
if err := row.Scan(&tblCount, &colCount); err != nil {
|
||||
log.Fatalf("Failed to check schema_migrations table, error: %v \n", err)
|
||||
}
|
||||
if tblCount == 0 {
|
||||
log.Printf("schema_migrations table does not exist, skip.\n")
|
||||
return
|
||||
}
|
||||
if colCount > 0 {
|
||||
log.Printf("schema_migrations table does not require update, skip.\n")
|
||||
return
|
||||
}
|
||||
if _, err := db.Exec(pgSQLDelRows); err != nil {
|
||||
log.Fatalf("Failed to clean up table, error: %v", err)
|
||||
}
|
||||
if _, err := db.Exec(pgSQLAlterStmt); err != nil {
|
||||
log.Fatalf("Failed to update database, error: %v \n", err)
|
||||
}
|
||||
log.Printf("Done updating database. \n")
|
||||
}
|
@ -1,112 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package config provide methods to get the configurations reqruied by code in src/common
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/astaxie/beego/cache"
|
||||
"github.com/goharbor/harbor/src/adminserver/client"
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
)
|
||||
|
||||
// Manager manages configurations
|
||||
type Manager struct {
|
||||
client client.Client
|
||||
Cache bool
|
||||
cache cache.Cache
|
||||
key string
|
||||
}
|
||||
|
||||
// NewManager returns an instance of Manager
|
||||
func NewManager(client client.Client, enableCache bool) *Manager {
|
||||
m := &Manager{
|
||||
client: client,
|
||||
}
|
||||
|
||||
if enableCache {
|
||||
m.Cache = true
|
||||
m.cache = cache.NewMemoryCache()
|
||||
m.key = "cfg"
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Load configurations, if cache is enabled, cache the configurations
|
||||
func (m *Manager) Load() (map[string]interface{}, error) {
|
||||
c, err := m.client.GetCfgs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.Cache {
|
||||
expi, err := getCfgExpiration(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// copy the configuration map so that later modification to the
|
||||
// map does not effect the cached value
|
||||
cachedCfgs := map[string]interface{}{}
|
||||
for k, v := range c {
|
||||
cachedCfgs[k] = v
|
||||
}
|
||||
|
||||
if err = m.cache.Put(m.key, cachedCfgs,
|
||||
time.Duration(expi)*time.Second); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Reset configurations
|
||||
func (m *Manager) Reset() error {
|
||||
return m.client.ResetCfgs()
|
||||
}
|
||||
|
||||
func getCfgExpiration(m map[string]interface{}) (int, error) {
|
||||
if m == nil {
|
||||
return 0, fmt.Errorf("can not get cfg expiration as configurations are null")
|
||||
}
|
||||
|
||||
expi, ok := m[common.CfgExpiration]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cfg expiration is not set")
|
||||
}
|
||||
|
||||
return int(expi.(float64)), nil
|
||||
}
|
||||
|
||||
// Get : if cache is enabled, read configurations from cache,
|
||||
// if cache is null or cache is disabled it loads configurations directly
|
||||
func (m *Manager) Get() (map[string]interface{}, error) {
|
||||
if m.Cache {
|
||||
c := m.cache.Get(m.key)
|
||||
if c != nil {
|
||||
return c.(map[string]interface{}), nil
|
||||
}
|
||||
}
|
||||
return m.Load()
|
||||
}
|
||||
|
||||
// Upload configurations
|
||||
func (m *Manager) Upload(cfgs map[string]interface{}) error {
|
||||
return m.client.UpdateCfgs(cfgs)
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package config
|
||||
|
||||
// the functions in common/config/config.go have been tested
|
||||
// by cases in UI and Jobservice
|
@ -125,8 +125,10 @@ func (c *CfgManager) GetAll() map[string]interface{} {
|
||||
metaDataList := metadata.Instance().GetAll()
|
||||
for _, item := range metaDataList {
|
||||
cfgValue, err := c.store.GetAnyType(item.Name)
|
||||
if err != metadata.ErrValueNotSet && err != nil {
|
||||
log.Errorf("Failed to get value of key %v, error %v", item.Name, err)
|
||||
if err != nil {
|
||||
if err != metadata.ErrValueNotSet {
|
||||
log.Errorf("Failed to get value of key %v, error %v", item.Name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
resultMap[item.Name] = cfgValue
|
||||
@ -145,8 +147,10 @@ func (c *CfgManager) GetUserCfgs() map[string]interface{} {
|
||||
for _, item := range metaDataList {
|
||||
if item.Scope == metadata.UserScope {
|
||||
cfgValue, err := c.store.GetAnyType(item.Name)
|
||||
if err != metadata.ErrValueNotSet && err != nil {
|
||||
log.Errorf("Failed to get value of key %v, error %v", item.Name, err)
|
||||
if err != nil {
|
||||
if err != metadata.ErrValueNotSet {
|
||||
log.Errorf("Failed to get value of key %v, error %v", item.Name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
resultMap[item.Name] = cfgValue
|
||||
|
@ -30,7 +30,7 @@ type Item struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
// It can be &IntType{}, &StringType{}, &BoolType{}, &PasswordType{}, &MapType{} etc, any type interface implementation
|
||||
ItemType Type
|
||||
// Is this settign can be modified after configure
|
||||
// TODO: Clarify the usage of this attribute
|
||||
Editable bool `json:"editable,omitempty"`
|
||||
}
|
||||
|
||||
@ -44,6 +44,7 @@ const (
|
||||
LdapGroupGroup = "ldapgroup"
|
||||
EmailGroup = "email"
|
||||
UAAGroup = "uaa"
|
||||
HTTPAuthGroup = "http_auth"
|
||||
DatabaseGroup = "database"
|
||||
// Put all config items do not belong a existing group into basic
|
||||
BasicGroup = "basic"
|
||||
@ -51,13 +52,13 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
// ConfigList - All configure items used in harbor
|
||||
// Steps to onboard a new setting
|
||||
// 1. Add configure item in metadatalist.go
|
||||
// 2. Get/Set config settings by CfgManager
|
||||
// 3. CfgManager.Load()/CfgManager.Save() to load/save from configure storage.
|
||||
ConfigList = []Item{
|
||||
// TODO: All these Name should be reference to const, see #7040
|
||||
{Name: "admin_initial_password", Scope: SystemScope, Group: BasicGroup, EnvKey: "HARBOR_ADMIN_PASSWORD", DefaultValue: "", ItemType: &PasswordType{}, Editable: true},
|
||||
{Name: "admiral_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "ADMIRAL_URL", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||
{Name: "auth_mode", Scope: UserScope, Group: BasicGroup, EnvKey: "AUTH_MODE", DefaultValue: "db_auth", ItemType: &AuthModeType{}, Editable: false},
|
||||
@ -101,6 +102,7 @@ var (
|
||||
{Name: "ldap_uid", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_UID", DefaultValue: "cn", ItemType: &NonEmptyStringType{}, Editable: false},
|
||||
{Name: "ldap_url", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_URL", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
|
||||
{Name: "ldap_verify_cert", Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_VERIFY_CERT", DefaultValue: "true", ItemType: &BoolType{}, Editable: false},
|
||||
{Name: common.LDAPGroupMembershipAttribute, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_GROUP_MEMBERSHIP_ATTRIBUTE", DefaultValue: "memberof", ItemType: &StringType{}, Editable: true},
|
||||
|
||||
{Name: "max_job_workers", Scope: SystemScope, Group: BasicGroup, EnvKey: "MAX_JOB_WORKERS", DefaultValue: "10", ItemType: &IntType{}, Editable: false},
|
||||
{Name: "notary_url", Scope: SystemScope, Group: BasicGroup, EnvKey: "NOTARY_URL", DefaultValue: "http://notary-server:4443", ItemType: &StringType{}, Editable: false},
|
||||
@ -128,8 +130,14 @@ var (
|
||||
{Name: "uaa_endpoint", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_ENDPOINT", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||
{Name: "uaa_verify_cert", Scope: UserScope, Group: UAAGroup, EnvKey: "UAA_VERIFY_CERT", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
|
||||
|
||||
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, EnvKey: "HTTP_AUTHPROXY_ENDPOINT", DefaultValue: "", ItemType: &StringType{}, Editable: false},
|
||||
{Name: common.HTTPAuthProxySkipCertVerify, Scope: UserScope, Group: HTTPAuthGroup, EnvKey: "HTTP_AUTHPROXY_SKIP_CERT_VERIFY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
|
||||
{Name: common.HTTPAuthProxyAlwaysOnboard, Scope: UserScope, Group: HTTPAuthGroup, EnvKey: "HTTP_AUTHPROXY_ALWAYS_ONBOARD", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
|
||||
|
||||
{Name: "with_chartmuseum", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CHARTMUSEUM", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
||||
{Name: "with_clair", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_CLAIR", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
||||
{Name: "with_notary", Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
|
||||
// the unit of expiration is minute, 43200 minutes = 30 days
|
||||
{Name: "robot_token_duration", Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
|
||||
}
|
||||
)
|
||||
|
@ -61,11 +61,11 @@ type AuthModeType struct {
|
||||
}
|
||||
|
||||
func (t *AuthModeType) validate(str string) error {
|
||||
if str == common.LDAPAuth || str == common.DBAuth || str == common.UAAAuth {
|
||||
if str == common.LDAPAuth || str == common.DBAuth || str == common.UAAAuth || str == common.HTTPAuth {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid %s, shoud be one of %s, %s, %s",
|
||||
common.AUTHMode, common.DBAuth, common.LDAPAuth, common.UAAAuth)
|
||||
return fmt.Errorf("invalid %s, shoud be one of %s, %s, %s, %s",
|
||||
common.AUTHMode, common.DBAuth, common.LDAPAuth, common.UAAAuth, common.HTTPAuth)
|
||||
}
|
||||
|
||||
// ProjectCreationRestrictionType ...
|
||||
|
@ -41,64 +41,66 @@ const (
|
||||
ResourceTypeImage = "i"
|
||||
ResourceTypeChart = "c"
|
||||
|
||||
ExtEndpoint = "ext_endpoint"
|
||||
AUTHMode = "auth_mode"
|
||||
DatabaseType = "database_type"
|
||||
PostGreSQLHOST = "postgresql_host"
|
||||
PostGreSQLPort = "postgresql_port"
|
||||
PostGreSQLUsername = "postgresql_username"
|
||||
PostGreSQLPassword = "postgresql_password"
|
||||
PostGreSQLDatabase = "postgresql_database"
|
||||
PostGreSQLSSLMode = "postgresql_sslmode"
|
||||
SelfRegistration = "self_registration"
|
||||
CoreURL = "core_url"
|
||||
JobServiceURL = "jobservice_url"
|
||||
LDAPURL = "ldap_url"
|
||||
LDAPSearchDN = "ldap_search_dn"
|
||||
LDAPSearchPwd = "ldap_search_password"
|
||||
LDAPBaseDN = "ldap_base_dn"
|
||||
LDAPUID = "ldap_uid"
|
||||
LDAPFilter = "ldap_filter"
|
||||
LDAPScope = "ldap_scope"
|
||||
LDAPTimeout = "ldap_timeout"
|
||||
LDAPVerifyCert = "ldap_verify_cert"
|
||||
LDAPGroupBaseDN = "ldap_group_base_dn"
|
||||
LDAPGroupSearchFilter = "ldap_group_search_filter"
|
||||
LDAPGroupAttributeName = "ldap_group_attribute_name"
|
||||
LDAPGroupSearchScope = "ldap_group_search_scope"
|
||||
TokenServiceURL = "token_service_url"
|
||||
RegistryURL = "registry_url"
|
||||
EmailHost = "email_host"
|
||||
EmailPort = "email_port"
|
||||
EmailUsername = "email_username"
|
||||
EmailPassword = "email_password"
|
||||
EmailFrom = "email_from"
|
||||
EmailSSL = "email_ssl"
|
||||
EmailIdentity = "email_identity"
|
||||
EmailInsecure = "email_insecure"
|
||||
ProjectCreationRestriction = "project_creation_restriction"
|
||||
MaxJobWorkers = "max_job_workers"
|
||||
TokenExpiration = "token_expiration"
|
||||
CfgExpiration = "cfg_expiration"
|
||||
JobLogDir = "job_log_dir"
|
||||
AdminInitialPassword = "admin_initial_password"
|
||||
AdmiralEndpoint = "admiral_url"
|
||||
WithNotary = "with_notary"
|
||||
WithClair = "with_clair"
|
||||
ScanAllPolicy = "scan_all_policy"
|
||||
ClairDBPassword = "clair_db_password"
|
||||
ClairDBHost = "clair_db_host"
|
||||
ClairDBPort = "clair_db_port"
|
||||
ClairDB = "clair_db"
|
||||
ClairDBUsername = "clair_db_username"
|
||||
ClairDBSSLMode = "clair_db_sslmode"
|
||||
UAAEndpoint = "uaa_endpoint"
|
||||
UAAClientID = "uaa_client_id"
|
||||
UAAClientSecret = "uaa_client_secret"
|
||||
UAAVerifyCert = "uaa_verify_cert"
|
||||
ExtEndpoint = "ext_endpoint"
|
||||
AUTHMode = "auth_mode"
|
||||
DatabaseType = "database_type"
|
||||
PostGreSQLHOST = "postgresql_host"
|
||||
PostGreSQLPort = "postgresql_port"
|
||||
PostGreSQLUsername = "postgresql_username"
|
||||
PostGreSQLPassword = "postgresql_password"
|
||||
PostGreSQLDatabase = "postgresql_database"
|
||||
PostGreSQLSSLMode = "postgresql_sslmode"
|
||||
SelfRegistration = "self_registration"
|
||||
CoreURL = "core_url"
|
||||
JobServiceURL = "jobservice_url"
|
||||
LDAPURL = "ldap_url"
|
||||
LDAPSearchDN = "ldap_search_dn"
|
||||
LDAPSearchPwd = "ldap_search_password"
|
||||
LDAPBaseDN = "ldap_base_dn"
|
||||
LDAPUID = "ldap_uid"
|
||||
LDAPFilter = "ldap_filter"
|
||||
LDAPScope = "ldap_scope"
|
||||
LDAPTimeout = "ldap_timeout"
|
||||
LDAPVerifyCert = "ldap_verify_cert"
|
||||
LDAPGroupBaseDN = "ldap_group_base_dn"
|
||||
LDAPGroupSearchFilter = "ldap_group_search_filter"
|
||||
LDAPGroupAttributeName = "ldap_group_attribute_name"
|
||||
LDAPGroupSearchScope = "ldap_group_search_scope"
|
||||
TokenServiceURL = "token_service_url"
|
||||
RegistryURL = "registry_url"
|
||||
EmailHost = "email_host"
|
||||
EmailPort = "email_port"
|
||||
EmailUsername = "email_username"
|
||||
EmailPassword = "email_password"
|
||||
EmailFrom = "email_from"
|
||||
EmailSSL = "email_ssl"
|
||||
EmailIdentity = "email_identity"
|
||||
EmailInsecure = "email_insecure"
|
||||
ProjectCreationRestriction = "project_creation_restriction"
|
||||
MaxJobWorkers = "max_job_workers"
|
||||
TokenExpiration = "token_expiration"
|
||||
CfgExpiration = "cfg_expiration"
|
||||
AdminInitialPassword = "admin_initial_password"
|
||||
AdmiralEndpoint = "admiral_url"
|
||||
WithNotary = "with_notary"
|
||||
WithClair = "with_clair"
|
||||
ScanAllPolicy = "scan_all_policy"
|
||||
ClairDBPassword = "clair_db_password"
|
||||
ClairDBHost = "clair_db_host"
|
||||
ClairDBPort = "clair_db_port"
|
||||
ClairDB = "clair_db"
|
||||
ClairDBUsername = "clair_db_username"
|
||||
ClairDBSSLMode = "clair_db_sslmode"
|
||||
UAAEndpoint = "uaa_endpoint"
|
||||
UAAClientID = "uaa_client_id"
|
||||
UAAClientSecret = "uaa_client_secret"
|
||||
UAAVerifyCert = "uaa_verify_cert"
|
||||
HTTPAuthProxyEndpoint = "http_authproxy_endpoint"
|
||||
HTTPAuthProxySkipCertVerify = "http_authproxy_skip_cert_verify"
|
||||
HTTPAuthProxyAlwaysOnboard = "http_authproxy_always_onboard"
|
||||
|
||||
DefaultClairEndpoint = "http://clair:6060"
|
||||
CfgDriverDB = "db"
|
||||
CfgDriverJSON = "json"
|
||||
NewHarborAdminName = "admin@harbor.local"
|
||||
RegistryStorageProviderName = "registry_storage_provider_name"
|
||||
UserMember = "u"
|
||||
@ -106,12 +108,11 @@ const (
|
||||
ReadOnly = "read_only"
|
||||
ClairURL = "clair_url"
|
||||
NotaryURL = "notary_url"
|
||||
DefaultAdminserverEndpoint = "http://adminserver:8080"
|
||||
DefaultCoreEndpoint = "http://core:8080"
|
||||
DefaultNotaryEndpoint = "http://notary-server:4443"
|
||||
LdapGroupType = 1
|
||||
ReloadKey = "reload_key"
|
||||
LdapGroupAdminDn = "ldap_group_admin_dn"
|
||||
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
|
||||
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
|
||||
WithChartMuseum = "with_chartmuseum"
|
||||
ChartRepoURL = "chart_repository_url"
|
||||
@ -120,51 +121,7 @@ const (
|
||||
DefaultRegistryCtlURL = "http://registryctl:8080"
|
||||
DefaultClairHealthCheckServerURL = "http://clair:6061"
|
||||
// Use this prefix to distinguish harbor user, the prefix contains a special character($), so it cannot be registered as a harbor user.
|
||||
RobotPrefix = "robot$"
|
||||
CoreConfigPath = "/api/internal/configurations"
|
||||
)
|
||||
|
||||
// TODO remove with adminserver
|
||||
// Shared variable, not allowed to modify
|
||||
var (
|
||||
|
||||
// value is default value
|
||||
HarborStringKeysMap = map[string]string{
|
||||
AUTHMode: "db_auth",
|
||||
LDAPURL: "",
|
||||
LDAPSearchDN: "",
|
||||
LDAPSearchPwd: "",
|
||||
LDAPBaseDN: "",
|
||||
LDAPUID: "",
|
||||
LDAPFilter: "",
|
||||
LDAPGroupAttributeName: "",
|
||||
LDAPGroupBaseDN: "",
|
||||
LdapGroupAdminDn: "",
|
||||
LDAPGroupSearchFilter: "",
|
||||
EmailHost: "smtp.mydomain.com",
|
||||
EmailUsername: "sample_admin@mydomain.com",
|
||||
EmailPassword: "abc",
|
||||
EmailFrom: "admin <sample_admin@mydomain.com>",
|
||||
EmailIdentity: "",
|
||||
ProjectCreationRestriction: ProCrtRestrEveryone,
|
||||
UAAClientID: "",
|
||||
UAAEndpoint: "",
|
||||
}
|
||||
|
||||
HarborNumKeysMap = map[string]int{
|
||||
EmailPort: 25,
|
||||
LDAPScope: 2,
|
||||
LDAPTimeout: 5,
|
||||
LDAPGroupSearchScope: 2,
|
||||
TokenExpiration: 30,
|
||||
}
|
||||
|
||||
HarborBoolKeysMap = map[string]bool{
|
||||
EmailSSL: false,
|
||||
EmailInsecure: false,
|
||||
SelfRegistration: true,
|
||||
LDAPVerifyCert: true,
|
||||
UAAVerifyCert: true,
|
||||
ReadOnly: false,
|
||||
}
|
||||
RobotPrefix = "robot$"
|
||||
CoreConfigPath = "/api/internal/configurations"
|
||||
RobotTokenDuration = "robot_token_duration"
|
||||
)
|
||||
|
@ -952,12 +952,8 @@ func TestDeleteRepPolicy(t *testing.T) {
|
||||
}
|
||||
t.Logf("delete rep policy, id: %d", policyID)
|
||||
p, err := GetRepPolicy(policyID)
|
||||
if err != nil && err != orm.ErrNoRows {
|
||||
t.Errorf("Error occurred in GetRepPolicy:%v", err)
|
||||
}
|
||||
if p != nil && !p.Deleted {
|
||||
t.Errorf("Able to find rep policy after deletion, id: %d", policyID)
|
||||
}
|
||||
require.Nil(t, err)
|
||||
assert.Nil(t, p)
|
||||
}
|
||||
|
||||
func TestGetOrmer(t *testing.T) {
|
||||
|
@ -195,13 +195,9 @@ func UpdateRepPolicy(policy *models.RepPolicy) error {
|
||||
|
||||
// DeleteRepPolicy ...
|
||||
func DeleteRepPolicy(id int64) error {
|
||||
o := GetOrmer()
|
||||
policy := &models.RepPolicy{
|
||||
ID: id,
|
||||
Deleted: true,
|
||||
UpdateTime: time.Now(),
|
||||
}
|
||||
_, err := o.Update(policy, "Deleted")
|
||||
_, err := GetOrmer().Delete(&models.RepPolicy{
|
||||
ID: id,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -293,6 +289,12 @@ func DeleteRepJob(id int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteRepJobs deletes replication jobs by policy ID
|
||||
func DeleteRepJobs(policyID int64) error {
|
||||
_, err := GetOrmer().QueryTable(&models.RepJob{}).Filter("PolicyID", policyID).Delete()
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRepJobStatus ...
|
||||
func UpdateRepJobStatus(id int64, status string) error {
|
||||
o := GetOrmer()
|
||||
|
56
src/common/dao/replication_job_test.go
Normal file
56
src/common/dao/replication_job_test.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright Project Harbor Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dao
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDeleteRepJobs(t *testing.T) {
|
||||
var policyID int64 = 999
|
||||
_, err := AddRepJob(models.RepJob{
|
||||
PolicyID: policyID,
|
||||
Repository: "library/hello-world",
|
||||
Operation: "delete",
|
||||
Status: "success",
|
||||
})
|
||||
require.Nil(t, err)
|
||||
_, err = AddRepJob(models.RepJob{
|
||||
PolicyID: policyID,
|
||||
Repository: "library/hello-world",
|
||||
Operation: "delete",
|
||||
Status: "success",
|
||||
})
|
||||
require.Nil(t, err)
|
||||
|
||||
jobs, err := GetRepJobs(&models.RepJobQuery{
|
||||
PolicyID: policyID,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 2, len(jobs))
|
||||
|
||||
err = DeleteRepJobs(policyID)
|
||||
require.Nil(t, err)
|
||||
|
||||
jobs, err = GetRepJobs(&models.RepJobQuery{
|
||||
PolicyID: policyID,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, 0, len(jobs))
|
||||
}
|
@ -133,3 +133,12 @@ func ArrayEqual(arrayA, arrayB []int) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ClearHTTPAuthProxyUsers remove the records from harbor_users to delete all user imported via
|
||||
// HTTP Auth Proxy
|
||||
func ClearHTTPAuthProxyUsers() error {
|
||||
o := GetOrmer()
|
||||
sql := "DELETE FROM harbor_user WHERE comment='By Authproxy'"
|
||||
_, err := o.Raw(sql).Exec()
|
||||
return err
|
||||
}
|
||||
|
@ -65,36 +65,13 @@ type Email struct {
|
||||
Insecure bool `json:"insecure"`
|
||||
}
|
||||
|
||||
/*
|
||||
// Registry ...
|
||||
type Registry struct {
|
||||
URL string `json:"url"`
|
||||
// HTTPAuthProxy wraps the settings for HTTP auth proxy
|
||||
type HTTPAuthProxy struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
SkipCertVerify bool `json:"skip_cert_verify"`
|
||||
AlwaysOnBoard bool `json:"always_onboard"`
|
||||
}
|
||||
|
||||
// TokenService ...
|
||||
type TokenService struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// SystemCfg holds all configurations of system
|
||||
type SystemCfg struct {
|
||||
DomainName string `json:"domain_name"` // Harbor external URL: protocol://host:port
|
||||
Authentication *Authentication `json:"authentication"`
|
||||
Database *Database `json:"database"`
|
||||
TokenService *TokenService `json:"token_service"`
|
||||
Registry *Registry `json:"registry"`
|
||||
Email *Email `json:"email"`
|
||||
VerifyRemoteCert bool `json:"verify_remote_cert"`
|
||||
ProjectCreationRestriction string `json:"project_creation_restriction"`
|
||||
MaxJobWorkers int `json:"max_job_workers"`
|
||||
JobLogDir string `json:"job_log_dir"`
|
||||
InitialAdminPwd string `json:"initial_admin_pwd,omitempty"`
|
||||
TokenExpiration int `json:"token_expiration"` // in minute
|
||||
SecretKey string `json:"secret_key,omitempty"`
|
||||
CfgExpiration int `json:"cfg_expiration"`
|
||||
}
|
||||
*/
|
||||
|
||||
// ConfigEntry ...
|
||||
type ConfigEntry struct {
|
||||
ID int64 `orm:"pk;auto;column(id)" json:"-"`
|
||||
|
@ -29,11 +29,12 @@ type LdapConf struct {
|
||||
|
||||
// LdapGroupConf holds information about ldap group
|
||||
type LdapGroupConf struct {
|
||||
LdapGroupBaseDN string `json:"ldap_group_base_dn,omitempty"`
|
||||
LdapGroupFilter string `json:"ldap_group_filter,omitempty"`
|
||||
LdapGroupNameAttribute string `json:"ldap_group_name_attribute,omitempty"`
|
||||
LdapGroupSearchScope int `json:"ldap_group_search_scope"`
|
||||
LdapGroupAdminDN string `json:"ldap_group_admin_dn,omitempty"`
|
||||
LdapGroupBaseDN string `json:"ldap_group_base_dn,omitempty"`
|
||||
LdapGroupFilter string `json:"ldap_group_filter,omitempty"`
|
||||
LdapGroupNameAttribute string `json:"ldap_group_name_attribute,omitempty"`
|
||||
LdapGroupSearchScope int `json:"ldap_group_search_scope"`
|
||||
LdapGroupAdminDN string `json:"ldap_group_admin_dn,omitempty"`
|
||||
LdapGroupMembershipAttribute string `json:"ldap_group_membership_attribute,omitempty"`
|
||||
}
|
||||
|
||||
// LdapUser ...
|
||||
|
@ -29,6 +29,7 @@ type Robot struct {
|
||||
Name string `orm:"column(name)" json:"name"`
|
||||
Description string `orm:"column(description)" json:"description"`
|
||||
ProjectID int64 `orm:"column(project_id)" json:"project_id"`
|
||||
ExpiresAt int64 `orm:"column(expiresat)" json:"expiresat"`
|
||||
Disabled bool `orm:"column(disabled)" json:"disabled"`
|
||||
CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
|
||||
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
|
||||
|
@ -25,5 +25,9 @@ func (rc RobotClaims) Valid() error {
|
||||
if rc.Access == nil {
|
||||
return errors.New("The access info cannot be nil")
|
||||
}
|
||||
stdErr := rc.StandardClaims.Valid()
|
||||
if stdErr != nil {
|
||||
return stdErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -19,14 +19,15 @@ type HToken struct {
|
||||
}
|
||||
|
||||
// New ...
|
||||
func New(tokenID, projectID int64, access []*rbac.Policy) (*HToken, error) {
|
||||
func New(tokenID, projectID, expiresAt int64, access []*rbac.Policy) (*HToken, error) {
|
||||
rClaims := &RobotClaims{
|
||||
TokenID: tokenID,
|
||||
ProjectID: projectID,
|
||||
Access: access,
|
||||
StandardClaims: jwt.StandardClaims{
|
||||
ExpiresAt: time.Now().Add(DefaultOptions.TTL).Unix(),
|
||||
Issuer: DefaultOptions.Issuer,
|
||||
IssuedAt: time.Now().UTC().Unix(),
|
||||
ExpiresAt: expiresAt,
|
||||
Issuer: DefaultOptions().Issuer,
|
||||
},
|
||||
}
|
||||
err := rClaims.Valid()
|
||||
@ -34,13 +35,13 @@ func New(tokenID, projectID int64, access []*rbac.Policy) (*HToken, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &HToken{
|
||||
Token: *jwt.NewWithClaims(DefaultOptions.SignMethod, rClaims),
|
||||
Token: *jwt.NewWithClaims(DefaultOptions().SignMethod, rClaims),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Raw get the Raw string of token
|
||||
func (htk *HToken) Raw() (string, error) {
|
||||
key, err := DefaultOptions.GetKey()
|
||||
key, err := DefaultOptions().GetKey()
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
@ -54,12 +55,12 @@ func (htk *HToken) Raw() (string, error) {
|
||||
|
||||
// ParseWithClaims ...
|
||||
func ParseWithClaims(rawToken string, claims jwt.Claims) (*HToken, error) {
|
||||
key, err := DefaultOptions.GetKey()
|
||||
key, err := DefaultOptions().GetKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token, err := jwt.ParseWithClaims(rawToken, claims, func(token *jwt.Token) (interface{}, error) {
|
||||
if token.Method.Alg() != DefaultOptions.SignMethod.Alg() {
|
||||
if token.Method.Alg() != DefaultOptions().SignMethod.Alg() {
|
||||
return nil, errors.New("invalid signing method")
|
||||
}
|
||||
switch k := key.(type) {
|
||||
@ -75,9 +76,10 @@ func ParseWithClaims(rawToken string, claims jwt.Claims) (*HToken, error) {
|
||||
log.Errorf(fmt.Sprintf("parse token error, %v", err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !token.Valid {
|
||||
log.Errorf(fmt.Sprintf("invalid jwt token, %v", token))
|
||||
return nil, err
|
||||
return nil, errors.New("invalid jwt token")
|
||||
}
|
||||
return &HToken{
|
||||
Token: *token,
|
||||
|
@ -1,25 +1,16 @@
|
||||
package token
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/utils/test"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
server, err := test.NewAdminserver(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer server.Close()
|
||||
|
||||
if err := os.Setenv("ADMINSERVER_URL", server.URL); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := config.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -40,7 +31,9 @@ func TestNew(t *testing.T) {
|
||||
|
||||
tokenID := int64(123)
|
||||
projectID := int64(321)
|
||||
token, err := New(tokenID, projectID, policies)
|
||||
tokenExpiration := time.Duration(10) * 24 * time.Hour
|
||||
expiresAt := time.Now().UTC().Add(tokenExpiration).Unix()
|
||||
token, err := New(tokenID, projectID, expiresAt, policies)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, token.Header["alg"], "RS256")
|
||||
@ -59,7 +52,9 @@ func TestRaw(t *testing.T) {
|
||||
tokenID := int64(123)
|
||||
projectID := int64(321)
|
||||
|
||||
token, err := New(tokenID, projectID, policies)
|
||||
tokenExpiration := time.Duration(10) * 24 * time.Hour
|
||||
expiresAt := time.Now().UTC().Add(tokenExpiration).Unix()
|
||||
token, err := New(tokenID, projectID, expiresAt, policies)
|
||||
assert.Nil(t, err)
|
||||
|
||||
rawTk, err := token.Raw()
|
||||
|
@ -16,12 +16,6 @@ const (
|
||||
signedMethod = "RS256"
|
||||
)
|
||||
|
||||
var (
|
||||
privateKey = config.TokenPrivateKeyPath()
|
||||
// DefaultOptions ...
|
||||
DefaultOptions = NewOptions()
|
||||
)
|
||||
|
||||
// Options ...
|
||||
type Options struct {
|
||||
SignMethod jwt.SigningMethod
|
||||
@ -31,9 +25,10 @@ type Options struct {
|
||||
Issuer string
|
||||
}
|
||||
|
||||
// NewOptions ...
|
||||
func NewOptions() *Options {
|
||||
privateKey, err := ioutil.ReadFile(privateKey)
|
||||
// DefaultOptions ...
|
||||
func DefaultOptions() *Options {
|
||||
privateKeyFile := config.TokenPrivateKeyPath()
|
||||
privateKey, err := ioutil.ReadFile(privateKeyFile)
|
||||
if err != nil {
|
||||
log.Errorf(fmt.Sprintf("failed to read private key %v", err))
|
||||
return nil
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestNewOptions(t *testing.T) {
|
||||
defaultOpt := DefaultOptions
|
||||
defaultOpt := DefaultOptions()
|
||||
assert.NotNil(t, defaultOpt)
|
||||
assert.Equal(t, defaultOpt.SignMethod, jwt.GetSigningMethod("RS256"))
|
||||
assert.Equal(t, defaultOpt.Issuer, "harbor-token-issuer")
|
||||
@ -16,7 +16,7 @@ func TestNewOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetKey(t *testing.T) {
|
||||
defaultOpt := DefaultOptions
|
||||
defaultOpt := DefaultOptions()
|
||||
key, err := defaultOpt.GetKey()
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, key)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
func TestParseServerity(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
newAssert := assert.New(t)
|
||||
in := map[string]models.Severity{
|
||||
"negligible": models.SevNone,
|
||||
"whatever": models.SevUnknown,
|
||||
@ -35,39 +35,39 @@ func TestParseServerity(t *testing.T) {
|
||||
"Critical": models.SevHigh,
|
||||
}
|
||||
for k, v := range in {
|
||||
assert.Equal(v, ParseClairSev(k))
|
||||
newAssert.Equal(v, ParseClairSev(k))
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransformVuln(t *testing.T) {
|
||||
var clairVuln = &models.ClairLayerEnvelope{}
|
||||
assert := assert.New(t)
|
||||
newAssert := assert.New(t)
|
||||
empty := []byte(`{"Layer":{"Features":[]}}`)
|
||||
loadVuln(empty, clairVuln)
|
||||
output, o := transformVuln(clairVuln)
|
||||
assert.Equal(0, output.Total)
|
||||
assert.Equal(models.SevNone, o)
|
||||
newAssert.Equal(0, output.Total)
|
||||
newAssert.Equal(models.SevNone, o)
|
||||
_, f, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
panic("Failed to get current directory")
|
||||
}
|
||||
curDir := path.Dir(f)
|
||||
real, err := ioutil.ReadFile(path.Join(curDir, "test/total-12.json"))
|
||||
fileData, err := ioutil.ReadFile(path.Join(curDir, "test/total-12.json"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
loadVuln(real, clairVuln)
|
||||
loadVuln(fileData, clairVuln)
|
||||
output, o = transformVuln(clairVuln)
|
||||
assert.Equal(12, output.Total)
|
||||
assert.Equal(models.SevHigh, o)
|
||||
newAssert.Equal(12, output.Total)
|
||||
newAssert.Equal(models.SevHigh, o)
|
||||
hit := false
|
||||
for _, s := range output.Summary {
|
||||
if s.Sev == int(models.SevHigh) {
|
||||
assert.Equal(3, s.Count, "There should be 3 components with High severity")
|
||||
newAssert.Equal(3, s.Count, "There should be 3 components with High severity")
|
||||
hit = true
|
||||
}
|
||||
}
|
||||
assert.True(hit, "Not found entry for high severity in summary list")
|
||||
newAssert.True(hit, "Not found entry for high severity in summary list")
|
||||
}
|
||||
|
||||
func loadVuln(input []byte, data *models.ClairLayerEnvelope) {
|
||||
|
@ -117,8 +117,8 @@ func formatURL(ldapURL string) (string, error) {
|
||||
|
||||
if strings.Contains(hostport, ":") {
|
||||
splitHostPort := strings.Split(hostport, ":")
|
||||
port, error := strconv.Atoi(splitHostPort[1])
|
||||
if error != nil {
|
||||
port, err := strconv.Atoi(splitHostPort[1])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("illegal url port")
|
||||
}
|
||||
if port == 636 {
|
||||
@ -212,6 +212,7 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
|
||||
for _, ldapEntry := range result.Entries {
|
||||
var u models.LdapUser
|
||||
groupDNList := []string{}
|
||||
groupAttr := strings.ToLower(session.ldapGroupConfig.LdapGroupMembershipAttribute)
|
||||
for _, attr := range ldapEntry.Attributes {
|
||||
// OpenLdap sometimes contain leading space in useranme
|
||||
val := strings.TrimSpace(attr.Values[0])
|
||||
@ -227,7 +228,7 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
|
||||
u.Email = val
|
||||
case "email":
|
||||
u.Email = val
|
||||
case "memberof":
|
||||
case groupAttr:
|
||||
for _, dnItem := range attr.Values {
|
||||
groupDNList = append(groupDNList, strings.TrimSpace(dnItem))
|
||||
log.Debugf("Found memberof %v", dnItem)
|
||||
@ -281,12 +282,18 @@ func (session *Session) Open() error {
|
||||
|
||||
// SearchLdap to search ldap with the provide filter
|
||||
func (session *Session) SearchLdap(filter string) (*goldap.SearchResult, error) {
|
||||
attributes := []string{"uid", "cn", "mail", "email", "memberof"}
|
||||
attributes := []string{"uid", "cn", "mail", "email"}
|
||||
lowerUID := strings.ToLower(session.ldapConfig.LdapUID)
|
||||
|
||||
if lowerUID != "uid" && lowerUID != "cn" && lowerUID != "mail" && lowerUID != "email" {
|
||||
attributes = append(attributes, session.ldapConfig.LdapUID)
|
||||
}
|
||||
|
||||
// Add the Group membership attribute
|
||||
groupAttr := strings.TrimSpace(session.ldapGroupConfig.LdapGroupMembershipAttribute)
|
||||
log.Debugf("Membership attribute: %s\n", groupAttr)
|
||||
attributes = append(attributes, groupAttr)
|
||||
|
||||
return session.SearchLdapAttribute(session.ldapConfig.LdapBaseDn, filter, attributes)
|
||||
}
|
||||
|
||||
|
@ -15,14 +15,10 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
)
|
||||
|
||||
var adminServerDefaultConfig = map[string]interface{}{
|
||||
var defaultConfig = map[string]interface{}{
|
||||
common.ExtEndpoint: "https://host01.com",
|
||||
common.AUTHMode: common.DBAuth,
|
||||
common.DatabaseType: "postgresql",
|
||||
@ -77,54 +73,7 @@ var adminServerDefaultConfig = map[string]interface{}{
|
||||
common.NotaryURL: "http://notary-server:4443",
|
||||
}
|
||||
|
||||
// NewAdminserver returns a mock admin server
|
||||
func NewAdminserver(config map[string]interface{}) (*httptest.Server, error) {
|
||||
m := []*RequestHandlerMapping{}
|
||||
if config == nil {
|
||||
config = adminServerDefaultConfig
|
||||
} else {
|
||||
for k, v := range adminServerDefaultConfig {
|
||||
if _, ok := config[k]; !ok {
|
||||
config[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
b, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: b,
|
||||
}
|
||||
|
||||
m = append(m, &RequestHandlerMapping{
|
||||
Method: "GET",
|
||||
Pattern: "/api/configs",
|
||||
Handler: Handler(resp),
|
||||
})
|
||||
|
||||
m = append(m, &RequestHandlerMapping{
|
||||
Method: "PUT",
|
||||
Pattern: "/api/configurations",
|
||||
Handler: Handler(&Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}),
|
||||
})
|
||||
|
||||
m = append(m, &RequestHandlerMapping{
|
||||
Method: "POST",
|
||||
Pattern: "/api/configurations/reset",
|
||||
Handler: Handler(&Response{
|
||||
StatusCode: http.StatusOK,
|
||||
}),
|
||||
})
|
||||
|
||||
return NewServer(m...), nil
|
||||
}
|
||||
|
||||
// GetDefaultConfigMap returns the defailt config map for easier modification.
|
||||
func GetDefaultConfigMap() map[string]interface{} {
|
||||
return adminServerDefaultConfig
|
||||
return defaultConfig
|
||||
}
|
@ -100,6 +100,7 @@ func init() {
|
||||
beego.Router("/api/projects/:id", &ProjectAPI{}, "delete:Delete;get:Get;put:Put")
|
||||
beego.Router("/api/users/:id", &UserAPI{}, "get:Get")
|
||||
beego.Router("/api/users", &UserAPI{}, "get:List;post:Post;delete:Delete;put:Put")
|
||||
beego.Router("/api/users/search", &UserAPI{}, "get:Search")
|
||||
beego.Router("/api/users/:id([0-9]+)/password", &UserAPI{}, "put:ChangePassword")
|
||||
beego.Router("/api/users/:id/permissions", &UserAPI{}, "get:ListUserPermissions")
|
||||
beego.Router("/api/users/:id/sysadmin", &UserAPI{}, "put:ToggleUserAdminRole")
|
||||
@ -821,6 +822,25 @@ func (a testapi) UsersGet(userName string, authInfo usrInfo) (int, []apilib.User
|
||||
return httpStatusCode, successPayLoad, err
|
||||
}
|
||||
|
||||
// Search registered users of Harbor.
|
||||
func (a testapi) UsersSearch(userName string, authInfo ...usrInfo) (int, []apilib.UserSearch, error) {
|
||||
_sling := sling.New().Get(a.basePath)
|
||||
// create path and map variables
|
||||
path := "/api/users/search"
|
||||
_sling = _sling.Path(path)
|
||||
// body params
|
||||
type QueryParams struct {
|
||||
UserName string `url:"username, omitempty"`
|
||||
}
|
||||
_sling = _sling.QueryStruct(&QueryParams{UserName: userName})
|
||||
httpStatusCode, body, err := request(_sling, jsonAcceptHeader, authInfo...)
|
||||
var successPayLoad []apilib.UserSearch
|
||||
if 200 == httpStatusCode && nil == err {
|
||||
err = json.Unmarshal(body, &successPayLoad)
|
||||
}
|
||||
return httpStatusCode, successPayLoad, err
|
||||
}
|
||||
|
||||
// Get registered users by userid.
|
||||
func (a testapi) UsersGetByID(userName string, authInfo usrInfo, userID int) (int, apilib.User, error) {
|
||||
_sling := sling.New().Get(a.basePath)
|
||||
|
@ -22,16 +22,20 @@ import (
|
||||
"github.com/astaxie/beego/validation"
|
||||
"github.com/goharbor/harbor/src/common/job"
|
||||
"github.com/goharbor/harbor/src/common/job/models"
|
||||
"github.com/goharbor/harbor/src/common/utils"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"github.com/robfig/cron"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScheduleHourly : 'Hourly'
|
||||
ScheduleHourly = "Hourly"
|
||||
// ScheduleDaily : 'Daily'
|
||||
ScheduleDaily = "Daily"
|
||||
// ScheduleWeekly : 'Weekly'
|
||||
ScheduleWeekly = "Weekly"
|
||||
// ScheduleCustom : 'Custom'
|
||||
ScheduleCustom = "Custom"
|
||||
// ScheduleManual : 'Manual'
|
||||
ScheduleManual = "Manual"
|
||||
// ScheduleNone : 'None'
|
||||
@ -48,12 +52,10 @@ type GCReq struct {
|
||||
|
||||
// ScheduleParam defines the parameter of schedule trigger
|
||||
type ScheduleParam struct {
|
||||
// Daily, Weekly, Manual, None
|
||||
// Daily, Weekly, Custom, Manual, None
|
||||
Type string `json:"type"`
|
||||
// Optional, only used when type is 'weekly'
|
||||
Weekday int8 `json:"weekday"`
|
||||
// The time offset with the UTC 00:00 in seconds
|
||||
Offtime int64 `json:"offtime"`
|
||||
// The cron string of scheduled job
|
||||
Cron string `json:"cron"`
|
||||
}
|
||||
|
||||
// GCRep holds the response of query gc
|
||||
@ -75,9 +77,9 @@ func (gr *GCReq) Valid(v *validation.Validation) {
|
||||
return
|
||||
}
|
||||
switch gr.Schedule.Type {
|
||||
case ScheduleDaily, ScheduleWeekly:
|
||||
if gr.Schedule.Offtime < 0 || gr.Schedule.Offtime > 3600*24 {
|
||||
v.SetError("offtime", fmt.Sprintf("Invalid schedule trigger parameter offtime: %d", gr.Schedule.Offtime))
|
||||
case ScheduleHourly, ScheduleDaily, ScheduleWeekly, ScheduleCustom:
|
||||
if _, err := cron.Parse(gr.Schedule.Cron); err != nil {
|
||||
v.SetError("cron", fmt.Sprintf("Invalid schedule trigger parameter cron: %s", gr.Schedule.Cron))
|
||||
}
|
||||
case ScheduleManual, ScheduleNone:
|
||||
default:
|
||||
@ -85,26 +87,15 @@ func (gr *GCReq) Valid(v *validation.Validation) {
|
||||
}
|
||||
}
|
||||
|
||||
// ToJob converts request to a job reconiged by job service.
|
||||
func (gr *GCReq) ToJob() (*models.JobData, error) {
|
||||
// ToJob converts request to a job recognized by job service.
|
||||
func (gr *GCReq) ToJob() *models.JobData {
|
||||
metadata := &models.JobMetadata{
|
||||
JobKind: gr.JobKind(),
|
||||
Cron: gr.Schedule.Cron,
|
||||
// GC job must be unique ...
|
||||
IsUnique: true,
|
||||
}
|
||||
|
||||
switch gr.Schedule.Type {
|
||||
case ScheduleDaily:
|
||||
h, m, s := utils.ParseOfftime(gr.Schedule.Offtime)
|
||||
metadata.Cron = fmt.Sprintf("%d %d %d * * *", s, m, h)
|
||||
case ScheduleWeekly:
|
||||
h, m, s := utils.ParseOfftime(gr.Schedule.Offtime)
|
||||
metadata.Cron = fmt.Sprintf("%d %d %d * * %d", s, m, h, gr.Schedule.Weekday%7)
|
||||
case ScheduleManual, ScheduleNone:
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported schedule trigger type: %s", gr.Schedule.Type)
|
||||
}
|
||||
|
||||
jobData := &models.JobData{
|
||||
Name: job.ImageGC,
|
||||
Parameters: gr.Parameters,
|
||||
@ -112,7 +103,7 @@ func (gr *GCReq) ToJob() (*models.JobData, error) {
|
||||
StatusHook: fmt.Sprintf("%s/service/notifications/jobs/adminjob/%d",
|
||||
config.InternalCoreURL(), gr.ID),
|
||||
}
|
||||
return jobData, nil
|
||||
return jobData
|
||||
}
|
||||
|
||||
// IsPeriodic ...
|
||||
@ -123,7 +114,7 @@ func (gr *GCReq) IsPeriodic() bool {
|
||||
// JobKind ...
|
||||
func (gr *GCReq) JobKind() string {
|
||||
switch gr.Schedule.Type {
|
||||
case ScheduleDaily, ScheduleWeekly:
|
||||
case ScheduleHourly, ScheduleDaily, ScheduleWeekly, ScheduleCustom:
|
||||
return job.JobKindPeriodic
|
||||
case ScheduleManual:
|
||||
return job.JobKindGeneric
|
||||
|
@ -41,16 +41,15 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestToJob(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Offtime: 200,
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
}
|
||||
|
||||
job, err := adminjob.ToJob()
|
||||
assert.Nil(t, err)
|
||||
job := adminjob.ToJob()
|
||||
assert.Equal(t, job.Name, "IMAGE_GC")
|
||||
assert.Equal(t, job.Metadata.JobKind, common_job.JobKindPeriodic)
|
||||
assert.Equal(t, job.Metadata.Cron, "20 3 0 * * *")
|
||||
@ -65,29 +64,15 @@ func TestToJobManual(t *testing.T) {
|
||||
Schedule: schedule,
|
||||
}
|
||||
|
||||
job, err := adminjob.ToJob()
|
||||
assert.Nil(t, err)
|
||||
job := adminjob.ToJob()
|
||||
assert.Equal(t, job.Name, "IMAGE_GC")
|
||||
assert.Equal(t, job.Metadata.JobKind, common_job.JobKindGeneric)
|
||||
}
|
||||
|
||||
func TestToJobErr(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "test",
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
}
|
||||
|
||||
_, err := adminjob.ToJob()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestIsPeriodic(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Offtime: 200,
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
}
|
||||
|
||||
adminjob := &GCReq{
|
||||
@ -100,8 +85,8 @@ func TestIsPeriodic(t *testing.T) {
|
||||
|
||||
func TestJobKind(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Offtime: 200,
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
}
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
@ -121,12 +106,12 @@ func TestJobKind(t *testing.T) {
|
||||
|
||||
func TestCronString(t *testing.T) {
|
||||
schedule := &ScheduleParam{
|
||||
Type: "Daily",
|
||||
Offtime: 102,
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
}
|
||||
adminjob := &GCReq{
|
||||
Schedule: schedule,
|
||||
}
|
||||
cronStr := adminjob.CronString()
|
||||
assert.True(t, strings.EqualFold(cronStr, "{\"type\":\"Daily\",\"Weekday\":0,\"Offtime\":102}"))
|
||||
assert.True(t, strings.EqualFold(cronStr, "{\"type\":\"Daily\",\"Cron\":\"20 3 0 * * *\"}"))
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/dao/project"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
@ -220,11 +221,20 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) {
|
||||
member.EntityID = request.MemberGroup.ID
|
||||
member.EntityType = common.GroupMember
|
||||
} else if len(request.MemberUser.Username) > 0 {
|
||||
var userID int
|
||||
member.EntityType = common.UserMember
|
||||
userID, err := auth.SearchAndOnBoardUser(request.MemberUser.Username)
|
||||
u, err := dao.GetUser(models.User{Username: request.MemberUser.Username})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if u != nil {
|
||||
userID = u.UserID
|
||||
} else {
|
||||
userID, err = auth.SearchAndOnBoardUser(request.MemberUser.Username)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
member.EntityID = userID
|
||||
} else if len(request.MemberGroup.LdapGroupDN) > 0 {
|
||||
|
||||
|
@ -214,7 +214,7 @@ func (gc *GCAPI) GetLog() {
|
||||
|
||||
// submitJob submits a job to job service per request
|
||||
func (gc *GCAPI) submitJob(gr *models.GCReq) {
|
||||
// cannot post multiple schdule for GC job.
|
||||
// cannot post multiple schedule for GC job.
|
||||
if gr.IsPeriodic() {
|
||||
jobs, err := dao.GetAdminJobs(&common_models.AdminJobQuery{
|
||||
Name: common_job.ImageGC,
|
||||
@ -243,7 +243,7 @@ func (gc *GCAPI) submitJob(gr *models.GCReq) {
|
||||
gr.Parameters = map[string]interface{}{
|
||||
"redis_url_reg": os.Getenv("_REDIS_URL_REG"),
|
||||
}
|
||||
job, err := gr.ToJob()
|
||||
job := gr.ToJob()
|
||||
if err != nil {
|
||||
gc.HandleInternalServerError(fmt.Sprintf("%v", err))
|
||||
return
|
||||
|
@ -54,7 +54,7 @@ func TestConvertToGCRep(t *testing.T) {
|
||||
ID: 1,
|
||||
Name: "IMAGE_GC",
|
||||
Kind: "Generic",
|
||||
Cron: "{\"Type\":\"Manual\",\"Weekday\":0,\"Offtime\":0}",
|
||||
Cron: "{\"Type\":\"Daily\",\"Cron\":\"20 3 0 * * *\"}",
|
||||
Status: "pending",
|
||||
Deleted: false,
|
||||
},
|
||||
@ -63,9 +63,8 @@ func TestConvertToGCRep(t *testing.T) {
|
||||
Name: "IMAGE_GC",
|
||||
Kind: "Generic",
|
||||
Schedule: &api_modes.ScheduleParam{
|
||||
Type: "Manual",
|
||||
Weekday: 0,
|
||||
Offtime: 0,
|
||||
Type: "Daily",
|
||||
Cron: "20 3 0 * * *",
|
||||
},
|
||||
Status: "pending",
|
||||
Deleted: false,
|
||||
|
@ -16,14 +16,16 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/goharbor/harbor/src/common"
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/rbac"
|
||||
"github.com/goharbor/harbor/src/common/token"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RobotAPI ...
|
||||
@ -104,6 +106,9 @@ func (r *RobotAPI) Post() {
|
||||
}
|
||||
|
||||
var robotReq models.RobotReq
|
||||
// Token duration in minutes
|
||||
tokenDuration := time.Duration(config.RobotTokenDuration()) * time.Minute
|
||||
expiresAt := time.Now().UTC().Add(tokenDuration).Unix()
|
||||
r.DecodeJSONReq(&robotReq)
|
||||
createdName := common.RobotPrefix + robotReq.Name
|
||||
|
||||
@ -112,6 +117,7 @@ func (r *RobotAPI) Post() {
|
||||
Name: createdName,
|
||||
Description: robotReq.Description,
|
||||
ProjectID: r.project.ProjectID,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
id, err := dao.AddRobot(&robot)
|
||||
if err != nil {
|
||||
@ -125,7 +131,7 @@ func (r *RobotAPI) Post() {
|
||||
|
||||
// generate the token, and return it with response data.
|
||||
// token is not stored in the database.
|
||||
jwtToken, err := token.New(id, r.project.ProjectID, robotReq.Access)
|
||||
jwtToken, err := token.New(id, r.project.ProjectID, expiresAt, robotReq.Access)
|
||||
if err != nil {
|
||||
r.HandleInternalServerError(fmt.Sprintf("failed to valid parameters to generate token for robot account, %v", err))
|
||||
err := dao.DeleteRobot(id)
|
||||
|
@ -33,7 +33,7 @@ type chartSearchHandler func(string, []string) ([]*search.Result, error)
|
||||
|
||||
var searchHandler chartSearchHandler
|
||||
|
||||
// SearchAPI handles requesst to /api/search
|
||||
// SearchAPI handles request to /api/search
|
||||
type SearchAPI struct {
|
||||
BaseController
|
||||
}
|
||||
@ -41,7 +41,7 @@ type SearchAPI struct {
|
||||
type searchResult struct {
|
||||
Project []*models.Project `json:"project"`
|
||||
Repository []map[string]interface{} `json:"repository"`
|
||||
Chart []*search.Result
|
||||
Chart *[]*search.Result `json:"chart,omitempty"`
|
||||
}
|
||||
|
||||
// Get ...
|
||||
@ -141,8 +141,8 @@ func (s *SearchAPI) Get() {
|
||||
log.Errorf("failed to filter charts: %v", err)
|
||||
s.CustomAbort(http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
result.Chart = &chartResults
|
||||
|
||||
result.Chart = chartResults
|
||||
}
|
||||
|
||||
s.Data["json"] = result
|
||||
|
@ -201,8 +201,8 @@ func TestSearch(t *testing.T) {
|
||||
credential: sysAdmin,
|
||||
}, result)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1, len(result.Chart))
|
||||
require.Equal(t, "library/harbor", result.Chart[0].Name)
|
||||
require.Equal(t, 1, len(*(result.Chart)))
|
||||
require.Equal(t, "library/harbor", (*result.Chart)[0].Name)
|
||||
|
||||
// Restore chart search handler
|
||||
searchHandler = nil
|
||||
|
@ -46,6 +46,11 @@ type passwordReq struct {
|
||||
NewPassword string `json:"new_password"`
|
||||
}
|
||||
|
||||
type userSearch struct {
|
||||
UserID int `json:"user_id"`
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// Prepare validates the URL and parms
|
||||
func (ua *UserAPI) Prepare() {
|
||||
ua.BaseController.Prepare()
|
||||
@ -166,6 +171,40 @@ func (ua *UserAPI) List() {
|
||||
ua.ServeJSON()
|
||||
}
|
||||
|
||||
// Search ...
|
||||
func (ua *UserAPI) Search() {
|
||||
page, size := ua.GetPaginationParams()
|
||||
query := &models.UserQuery{
|
||||
Username: ua.GetString("username"),
|
||||
Email: ua.GetString("email"),
|
||||
Pagination: &models.Pagination{
|
||||
Page: page,
|
||||
Size: size,
|
||||
},
|
||||
}
|
||||
|
||||
total, err := dao.GetTotalOfUsers(query)
|
||||
if err != nil {
|
||||
ua.HandleInternalServerError(fmt.Sprintf("failed to get total of users: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
users, err := dao.ListUsers(query)
|
||||
if err != nil {
|
||||
ua.HandleInternalServerError(fmt.Sprintf("failed to get users: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
var userSearches []userSearch
|
||||
for _, user := range users {
|
||||
userSearches = append(userSearches, userSearch{UserID: user.UserID, Username: user.Username})
|
||||
}
|
||||
|
||||
ua.SetPaginationHeader(total, page, size)
|
||||
ua.Data["json"] = userSearches
|
||||
ua.ServeJSON()
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (ua *UserAPI) Put() {
|
||||
if !ua.modifiable() {
|
||||
|
@ -208,6 +208,35 @@ func TestUsersGet(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestUsersSearch(t *testing.T) {
|
||||
|
||||
fmt.Println("Testing User Search")
|
||||
assert := assert.New(t)
|
||||
apiTest := newHarborAPI()
|
||||
|
||||
testUser0002.Username = "testUser0002"
|
||||
// case 1: Search user2 without auth, expect 401
|
||||
|
||||
testUser0002Auth = &usrInfo{"testUser0002", "testUser0002"}
|
||||
code, users, err := apiTest.UsersSearch(testUser0002.Username)
|
||||
if err != nil {
|
||||
t.Error("Error occurred while search users", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(401, code, "Search users status should be 401")
|
||||
}
|
||||
// case 2: Search user2 with with common auth, expect 200
|
||||
code, users, err = apiTest.UsersSearch(testUser0002.Username, *testUser0002Auth)
|
||||
if err != nil {
|
||||
t.Error("Error occurred while search users", err.Error())
|
||||
t.Log(err)
|
||||
} else {
|
||||
assert.Equal(200, code, "Search users status should be 200")
|
||||
assert.Equal(1, len(users), "Search users record should be 1 ")
|
||||
testUser0002ID = users[0].UserID
|
||||
}
|
||||
}
|
||||
|
||||
func TestUsersGetByID(t *testing.T) {
|
||||
|
||||
fmt.Println("Testing User GetByID")
|
||||
|
@ -22,27 +22,38 @@ import (
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
"github.com/goharbor/harbor/src/common/utils/log"
|
||||
"github.com/goharbor/harbor/src/core/auth"
|
||||
"github.com/goharbor/harbor/src/core/config"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const refreshDuration = 5 * time.Second
|
||||
const userEntryComment = "By Authproxy"
|
||||
|
||||
// Auth implements HTTP authenticator the required attributes.
|
||||
// The attribute Endpoint is the HTTP endpoint to which the POST request should be issued for authentication
|
||||
type Auth struct {
|
||||
auth.DefaultAuthenticateHelper
|
||||
sync.Mutex
|
||||
Endpoint string
|
||||
SkipCertVerify bool
|
||||
AlwaysOnboard bool
|
||||
client *http.Client
|
||||
Endpoint string
|
||||
SkipCertVerify bool
|
||||
AlwaysOnboard bool
|
||||
settingTimeStamp time.Time
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// Authenticate issues http POST request to Endpoint if it returns 200 the authentication is considered success.
|
||||
func (a *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
|
||||
a.ensure()
|
||||
err := a.ensure()
|
||||
if err != nil {
|
||||
if a.Endpoint == "" {
|
||||
return nil, fmt.Errorf("failed to initialize HTTP Auth Proxy Authenticator, error: %v", err)
|
||||
}
|
||||
log.Warningf("Failed to refresh configuration for HTTP Auth Proxy Authenticator, error: %v, old settings will be used", err)
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, a.Endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send request, error: %v", err)
|
||||
@ -109,7 +120,7 @@ func (a *Auth) fillInModel(u *models.User) error {
|
||||
}
|
||||
u.Realname = u.Username
|
||||
u.Password = "1234567ab"
|
||||
u.Comment = "By Authproxy"
|
||||
u.Comment = userEntryComment
|
||||
if strings.Contains(u.Username, "@") {
|
||||
u.Email = u.Username
|
||||
} else {
|
||||
@ -118,13 +129,17 @@ func (a *Auth) fillInModel(u *models.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Auth) ensure() {
|
||||
func (a *Auth) ensure() error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
if a.Endpoint == "" {
|
||||
a.Endpoint = os.Getenv("AUTHPROXY_ENDPOINT")
|
||||
a.SkipCertVerify = strings.EqualFold(os.Getenv("AUTHPROXY_SKIP_CERT_VERIFY"), "true")
|
||||
a.AlwaysOnboard = strings.EqualFold(os.Getenv("AUTHPROXY_ALWAYS_ONBOARD"), "true")
|
||||
if time.Now().Sub(a.settingTimeStamp) >= refreshDuration {
|
||||
setting, err := config.HTTPAuthProxySetting()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Endpoint = setting.Endpoint
|
||||
a.SkipCertVerify = setting.SkipCertVerify
|
||||
a.AlwaysOnboard = setting.AlwaysOnBoard
|
||||
}
|
||||
if a.client == nil {
|
||||
tr := &http.Transport{
|
||||
@ -136,6 +151,7 @@ func (a *Auth) ensure() {
|
||||
Transport: tr,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -15,13 +15,16 @@
|
||||
package authproxy
|
||||
|
||||
import (
|
||||
"github.com/goharbor/harbor/src/common/dao"
|
||||
"github.com/goharbor/harbor/src/common/models"
|
||||
cut "github.com/goharbor/harbor/src/common/utils/test"
|
||||
"github.com/goharbor/harbor/src/core/auth"
|
||||
"github.com/goharbor/harbor/src/core/auth/authproxy/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var mockSvr *httptest.Server
|
||||
@ -30,13 +33,22 @@ var pwd = "1234567ab"
|
||||
var cmt = "By Authproxy"
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
cut.InitDatabaseFromEnv()
|
||||
if err := dao.ClearHTTPAuthProxyUsers(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mockSvr = test.NewMockServer(map[string]string{"jt": "pp", "Admin@vsphere.local": "Admin!23"})
|
||||
defer mockSvr.Close()
|
||||
a = &Auth{
|
||||
Endpoint: mockSvr.URL + "/test/login",
|
||||
SkipCertVerify: true,
|
||||
// So it won't require mocking the cfgManager
|
||||
settingTimeStamp: time.Now(),
|
||||
}
|
||||
rc := m.Run()
|
||||
if err := dao.ClearHTTPAuthProxyUsers(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if rc != 0 {
|
||||
os.Exit(rc)
|
||||
}
|
||||
@ -104,7 +116,6 @@ func TestAuth_Authenticate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Enable this case after adminserver refactor is merged.
|
||||
func TestAuth_PostAuthenticate(t *testing.T) {
|
||||
type tc struct {
|
||||
input *models.User
|
||||
@ -120,7 +131,7 @@ func TestAuth_PostAuthenticate(t *testing.T) {
|
||||
Email: "jt@placeholder.com",
|
||||
Realname: "jt",
|
||||
Password: pwd,
|
||||
Comment: fmt.Sprintf(cmtTmpl, mockSvr.URL+"/test/login"),
|
||||
Comment: userEntryComment,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -129,16 +140,19 @@ func TestAuth_PostAuthenticate(t *testing.T) {
|
||||
},
|
||||
expect: models.User{
|
||||
Username: "Admin@vsphere.local",
|
||||
Email: "jt@placeholder.com",
|
||||
Email: "Admin@vsphere.local",
|
||||
Realname: "Admin@vsphere.local",
|
||||
Password: pwd,
|
||||
Comment: fmt.Sprintf(cmtTmpl, mockSvr.URL+"/test/login"),
|
||||
Comment: userEntryComment,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range suite {
|
||||
a.PostAuthenticate(c.input)
|
||||
assert.Equal(t, c.expect, *c.input)
|
||||
assert.Equal(t, c.expect.Username, c.input.Username)
|
||||
assert.Equal(t, c.expect.Email, c.input.Email)
|
||||
assert.Equal(t, c.expect.Realname, c.input.Realname)
|
||||
assert.Equal(t, c.expect.Comment, c.input.Comment)
|
||||
}
|
||||
|
||||
}
|
||||
*/
|
||||
|
@ -30,38 +30,23 @@ import (
|
||||
)
|
||||
|
||||
var adminServerTestConfig = map[string]interface{}{
|
||||
common.ExtEndpoint: "host01.com",
|
||||
common.AUTHMode: "db_auth",
|
||||
common.DatabaseType: "postgresql",
|
||||
common.PostGreSQLHOST: "127.0.0.1",
|
||||
common.PostGreSQLPort: 5432,
|
||||
common.PostGreSQLUsername: "postgres",
|
||||
common.PostGreSQLPassword: "root123",
|
||||
common.PostGreSQLDatabase: "registry",
|
||||
// config.SelfRegistration: true,
|
||||
common.LDAPURL: "ldap://127.0.0.1",
|
||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||
common.LDAPSearchPwd: "admin",
|
||||
common.LDAPBaseDN: "dc=example,dc=com",
|
||||
common.LDAPUID: "uid",
|
||||
common.LDAPFilter: "",
|
||||
common.LDAPScope: 3,
|
||||
common.LDAPTimeout: 30,
|
||||
// config.TokenServiceURL: "",
|
||||
// config.RegistryURL: "",
|
||||
// config.EmailHost: "",
|
||||
// config.EmailPort: 25,
|
||||
// config.EmailUsername: "",
|
||||
// config.EmailPassword: "password",
|
||||
// config.EmailFrom: "from",
|
||||
// config.EmailSSL: true,
|
||||
// config.EmailIdentity: "",
|
||||
// config.ProjectCreationRestriction: config.ProCrtRestrAdmOnly,
|
||||
// config.VerifyRemoteCert: false,
|
||||
// config.MaxJobWorkers: 3,
|
||||
// config.TokenExpiration: 30,
|
||||
common.CfgExpiration: 5,
|
||||
// config.JobLogDir: "/var/log/jobs",
|
||||
common.ExtEndpoint: "host01.com",
|
||||
common.AUTHMode: "db_auth",
|
||||
common.DatabaseType: "postgresql",
|
||||
common.PostGreSQLHOST: "127.0.0.1",
|
||||
common.PostGreSQLPort: 5432,
|
||||
common.PostGreSQLUsername: "postgres",
|
||||
common.PostGreSQLPassword: "root123",
|
||||
common.PostGreSQLDatabase: "registry",
|
||||
common.LDAPURL: "ldap://127.0.0.1",
|
||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||
common.LDAPSearchPwd: "admin",
|
||||
common.LDAPBaseDN: "dc=example,dc=com",
|
||||
common.LDAPUID: "uid",
|
||||
common.LDAPFilter: "",
|
||||
common.LDAPScope: 3,
|
||||
common.LDAPTimeout: 30,
|
||||
common.CfgExpiration: 5,
|
||||
common.AdminInitialPassword: "password",
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
coreConfig "github.com/goharbor/harbor/src/core/config"
|
||||
)
|
||||
|
||||
var adminServerLdapTestConfig = map[string]interface{}{
|
||||
var ldapTestConfig = map[string]interface{}{
|
||||
common.ExtEndpoint: "host01.com",
|
||||
common.AUTHMode: "ldap_auth",
|
||||
common.DatabaseType: "postgresql",
|
||||
@ -42,29 +42,15 @@ var adminServerLdapTestConfig = map[string]interface{}{
|
||||
common.PostGreSQLPassword: "root123",
|
||||
common.PostGreSQLDatabase: "registry",
|
||||
// config.SelfRegistration: true,
|
||||
common.LDAPURL: "ldap://127.0.0.1",
|
||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||
common.LDAPSearchPwd: "admin",
|
||||
common.LDAPBaseDN: "dc=example,dc=com",
|
||||
common.LDAPUID: "uid",
|
||||
common.LDAPFilter: "",
|
||||
common.LDAPScope: 2,
|
||||
common.LDAPTimeout: 30,
|
||||
// config.TokenServiceURL: "",
|
||||
// config.RegistryURL: "",
|
||||
// config.EmailHost: "",
|
||||
// config.EmailPort: 25,
|
||||
// config.EmailUsername: "",
|
||||
// config.EmailPassword: "password",
|
||||
// config.EmailFrom: "from",
|
||||
// config.EmailSSL: true,
|
||||
// config.EmailIdentity: "",
|
||||
// config.ProjectCreationRestriction: config.ProCrtRestrAdmOnly,
|
||||
// config.VerifyRemoteCert: false,
|
||||
// config.MaxJobWorkers: 3,
|
||||
// config.TokenExpiration: 30,
|
||||
common.CfgExpiration: 5,
|
||||
// config.JobLogDir: "/var/log/jobs",
|
||||
common.LDAPURL: "ldap://127.0.0.1",
|
||||
common.LDAPSearchDN: "cn=admin,dc=example,dc=com",
|
||||
common.LDAPSearchPwd: "admin",
|
||||
common.LDAPBaseDN: "dc=example,dc=com",
|
||||
common.LDAPUID: "uid",
|
||||
common.LDAPFilter: "",
|
||||
common.LDAPScope: 2,
|
||||
common.LDAPTimeout: 30,
|
||||
common.CfgExpiration: 5,
|
||||
common.AdminInitialPassword: "password",
|
||||
common.LDAPGroupSearchFilter: "objectclass=groupOfNames",
|
||||
common.LDAPGroupBaseDN: "dc=example,dc=com",
|
||||
@ -75,7 +61,7 @@ var adminServerLdapTestConfig = map[string]interface{}{
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
test.InitDatabaseFromEnv()
|
||||
coreConfig.InitWithSettings(adminServerLdapTestConfig)
|
||||
coreConfig.InitWithSettings(ldapTestConfig)
|
||||
|
||||
secretKeyPath := "/tmp/secretkey"
|
||||
_, err := test.GenerateKey(secretKeyPath)
|
||||
@ -93,7 +79,8 @@ func TestMain(m *testing.M) {
|
||||
initSqls := []string{
|
||||
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
|
||||
"insert into project (name, owner_id) values ('member_test_01', 1)",
|
||||
"insert into user_group (group_name, group_type, group_property) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||
"insert into project (name, owner_id) values ('member_test_02', 1)",
|
||||
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
|
||||
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
|
||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
|
||||
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
|
||||
@ -101,6 +88,7 @@ func TestMain(m *testing.M) {
|
||||
|
||||
clearSqls := []string{
|
||||
"delete from project where name='member_test_01'",
|
||||
"delete from project where name='member_test_02'",
|
||||
"delete from harbor_user where username='member_test_01' or username='pm_sample'",
|
||||
"delete from user_group",
|
||||
"delete from project_member",
|
||||
@ -389,6 +377,25 @@ func TestAddProjectMemberWithLdapUser(t *testing.T) {
|
||||
if pmid == 0 {
|
||||
t.Errorf("Error occurred in AddOrUpdateProjectMember: pmid:%v", pmid)
|
||||
}
|
||||
|
||||
currentProject, err = dao.GetProjectByName("member_test_02")
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred when GetProjectByName: %v", err)
|
||||
}
|
||||
member2 := models.MemberReq{
|
||||
ProjectID: currentProject.ProjectID,
|
||||
MemberUser: models.User{
|
||||
Username: "mike",
|
||||
},
|
||||
Role: models.PROJECTADMIN,
|
||||
}
|
||||
pmid, err = api.AddProjectMember(currentProject.ProjectID, member2)
|
||||
if err != nil {
|
||||
t.Errorf("Error occurred in AddOrUpdateProjectMember: %v", err)
|
||||
}
|
||||
if pmid == 0 {
|
||||
t.Errorf("Error occurred in AddOrUpdateProjectMember: pmid:%v", pmid)
|
||||
}
|
||||
}
|
||||
func TestAddProjectMemberWithLdapGroup(t *testing.T) {
|
||||
currentProject, err := dao.GetProjectByName("member_test_01")
|
||||
|
@ -212,11 +212,12 @@ func LDAPGroupConf() (*models.LdapGroupConf, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &models.LdapGroupConf{
|
||||
LdapGroupBaseDN: cfgMgr.Get(common.LDAPGroupBaseDN).GetString(),
|
||||
LdapGroupFilter: cfgMgr.Get(common.LDAPGroupSearchFilter).GetString(),
|
||||
LdapGroupNameAttribute: cfgMgr.Get(common.LDAPGroupAttributeName).GetString(),
|
||||
LdapGroupSearchScope: cfgMgr.Get(common.LDAPGroupSearchScope).GetInt(),
|
||||
LdapGroupAdminDN: cfgMgr.Get(common.LdapGroupAdminDn).GetString(),
|
||||
LdapGroupBaseDN: cfgMgr.Get(common.LDAPGroupBaseDN).GetString(),
|
||||
LdapGroupFilter: cfgMgr.Get(common.LDAPGroupSearchFilter).GetString(),
|
||||
LdapGroupNameAttribute: cfgMgr.Get(common.LDAPGroupAttributeName).GetString(),
|
||||
LdapGroupSearchScope: cfgMgr.Get(common.LDAPGroupSearchScope).GetInt(),
|
||||
LdapGroupAdminDN: cfgMgr.Get(common.LdapGroupAdminDn).GetString(),
|
||||
LdapGroupMembershipAttribute: cfgMgr.Get(common.LDAPGroupMembershipAttribute).GetString(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -225,6 +226,11 @@ func TokenExpiration() (int, error) {
|
||||
return cfgMgr.Get(common.TokenExpiration).GetInt(), nil
|
||||
}
|
||||
|
||||
// RobotTokenDuration returns the token expiration time of robot account (in minute)
|
||||
func RobotTokenDuration() int {
|
||||
return cfgMgr.Get(common.RobotTokenDuration).GetInt()
|
||||
}
|
||||
|
||||
// ExtEndpoint returns the external URL of Harbor: protocol://host:port
|
||||
func ExtEndpoint() (string, error) {
|
||||
return cfgMgr.Get(common.ExtEndpoint).GetString(), nil
|
||||
@ -457,3 +463,17 @@ func GetClairHealthCheckServerURL() string {
|
||||
}
|
||||
return url
|
||||
}
|
||||
|
||||
// HTTPAuthProxySetting returns the setting of HTTP Auth proxy. the settings are only meaningful when the auth_mode is
|
||||
// set to http_auth
|
||||
func HTTPAuthProxySetting() (*models.HTTPAuthProxy, error) {
|
||||
if err := cfgMgr.Load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &models.HTTPAuthProxy{
|
||||
Endpoint: cfgMgr.Get(common.HTTPAuthProxyEndpoint).GetString(),
|
||||
SkipCertVerify: cfgMgr.Get(common.HTTPAuthProxySkipCertVerify).GetBool(),
|
||||
AlwaysOnBoard: cfgMgr.Get(common.HTTPAuthProxyAlwaysOnboard).GetBool(),
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
@ -97,6 +97,9 @@ func TestConfig(t *testing.T) {
|
||||
t.Fatalf("failed to get token expiration: %v", err)
|
||||
}
|
||||
|
||||
tkExp := RobotTokenDuration()
|
||||
assert.Equal(tkExp, 43200)
|
||||
|
||||
if _, err := ExtEndpoint(); err != nil {
|
||||
t.Fatalf("failed to get domain name: %v", err)
|
||||
}
|
||||
@ -222,3 +225,19 @@ func TestConfigureValue_GetMap(t *testing.T) {
|
||||
}
|
||||
fmt.Printf("%+v\n", policy)
|
||||
}
|
||||
|
||||
func TestHTTPAuthProxySetting(t *testing.T) {
|
||||
m := map[string]interface{}{
|
||||
common.HTTPAuthProxyAlwaysOnboard: "true",
|
||||
common.HTTPAuthProxySkipCertVerify: "true",
|
||||
common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix",
|
||||
}
|
||||
InitWithSettings(m)
|
||||
v, e := HTTPAuthProxySetting()
|
||||
assert.Nil(t, e)
|
||||
assert.Equal(t, *v, models.HTTPAuthProxy{
|
||||
Endpoint: "https://auth.proxy/suffix",
|
||||
AlwaysOnBoard: true,
|
||||
SkipCertVerify: true,
|
||||
})
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func TestGet(t *testing.T) {
|
||||
Name: name,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer delete(t, id)
|
||||
defer deleteProject(t, id)
|
||||
|
||||
// get by invalid input type
|
||||
_, err = d.Get([]string{})
|
||||
@ -230,7 +230,7 @@ func TestCreate(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer delete(t, id)
|
||||
defer deleteProject(t, id)
|
||||
|
||||
project, err := d.Get(id)
|
||||
assert.Nil(t, err)
|
||||
@ -288,7 +288,7 @@ func TestList(t *testing.T) {
|
||||
Name: name1,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer delete(t, id1)
|
||||
defer deleteProject(t, id1)
|
||||
|
||||
name2 := "project_for_test_get_all_02"
|
||||
id2, err := d.Create(&models.Project{
|
||||
@ -298,7 +298,7 @@ func TestList(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer delete(t, id2)
|
||||
defer deleteProject(t, id2)
|
||||
|
||||
// no filter
|
||||
result, err := d.List(nil)
|
||||
@ -346,7 +346,7 @@ func TestList(t *testing.T) {
|
||||
assert.True(t, found2)
|
||||
}
|
||||
|
||||
func delete(t *testing.T, id int64) {
|
||||
func deleteProject(t *testing.T, id int64) {
|
||||
d := NewDriver(client, endpoint, tokenReader)
|
||||
if err := d.Delete(id); err != nil {
|
||||
t.Logf("failed to delete project %d: %v", id, err)
|
||||
|
@ -45,6 +45,7 @@ func initRouters() {
|
||||
|
||||
beego.Router("/api/users/:id", &api.UserAPI{}, "get:Get;delete:Delete;put:Put")
|
||||
beego.Router("/api/users", &api.UserAPI{}, "get:List;post:Post")
|
||||
beego.Router("/api/users/search", &api.UserAPI{}, "get:Search")
|
||||
beego.Router("/api/users/:id([0-9]+)/password", &api.UserAPI{}, "put:ChangePassword")
|
||||
beego.Router("/api/users/:id/permissions", &api.UserAPI{}, "get:ListUserPermissions")
|
||||
beego.Router("/api/users/:id/sysadmin", &api.UserAPI{}, "put:ToggleUserAdminRole")
|
||||
|
@ -64,8 +64,8 @@ func (h *Handler) Handle() {
|
||||
ns[vuln.NamespaceName] = true
|
||||
}
|
||||
}
|
||||
if new := notification.New; new != nil {
|
||||
if vuln := new.Vulnerability; vuln != nil {
|
||||
if newNotification := notification.New; newNotification != nil {
|
||||
if vuln := newNotification.Vulnerability; vuln != nil {
|
||||
log.Debugf("new vulnerability namespace: %s", vuln.NamespaceName)
|
||||
ns[vuln.NamespaceName] = true
|
||||
}
|
||||
|
@ -382,7 +382,7 @@ The following configuration options are supported:
|
||||
| worker_pool.redis_pool.namespace | The namespace used in redis| JOB_SERVICE_POOL_REDIS_NAMESPACE |
|
||||
| loggers | Loggers for job service itself. Refer to [Configure loggers](#configure-loggers)| |
|
||||
| job_loggers | Loggers for the running jobs. Refer to [Configure loggers](#configure-loggers) | |
|
||||
| admin_server | The harbor admin server endpoint which used to retrieve Harbor configures| ADMINSERVER_URL |
|
||||
| core_server | The harbor core server endpoint which used to retrieve Harbor configures| CORE_URL |
|
||||
|
||||
### Sample
|
||||
|
||||
@ -428,9 +428,6 @@ job_loggers:
|
||||
loggers:
|
||||
- name: "STD_OUTPUT" # Same with above
|
||||
level: "DEBUG"
|
||||
|
||||
#Admin server endpoint
|
||||
admin_server: "http://adminserver:9010/"
|
||||
```
|
||||
|
||||
## API
|
||||
|
@ -65,8 +65,6 @@ type Configuration struct {
|
||||
// Server listening port
|
||||
Port uint `yaml:"port"`
|
||||
|
||||
AdminServer string `yaml:"admin_server"`
|
||||
|
||||
// Additional config when using https
|
||||
HTTPSConfig *HTTPSConfig `yaml:"https_config,omitempty"`
|
||||
|
||||
@ -171,11 +169,6 @@ func GetUIAuthSecret() string {
|
||||
return utils.ReadEnv(uiAuthSecret)
|
||||
}
|
||||
|
||||
// GetAdminServerEndpoint return the admin server endpoint
|
||||
func GetAdminServerEndpoint() string {
|
||||
return DefaultConfig.AdminServer
|
||||
}
|
||||
|
||||
// Load env variables
|
||||
func (c *Configuration) loadEnvs() {
|
||||
prot := utils.ReadEnv(jobServiceProtocol)
|
||||
@ -251,11 +244,6 @@ func (c *Configuration) loadEnvs() {
|
||||
}
|
||||
}
|
||||
|
||||
// admin server
|
||||
if coreServer := utils.ReadEnv(jobServiceCoreServerEndpoint); !utils.IsEmptyStr(coreServer) {
|
||||
c.AdminServer = coreServer
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Check if the configurations are valid settings.
|
||||
@ -325,9 +313,5 @@ func (c *Configuration) validate() error {
|
||||
return errors.New("missing logger config of job")
|
||||
}
|
||||
|
||||
if _, err := url.Parse(c.AdminServer); err != nil {
|
||||
return fmt.Errorf("invalid admin server endpoint: %s", err)
|
||||
}
|
||||
|
||||
return nil // valid
|
||||
}
|
||||
|
@ -69,11 +69,6 @@ func TestDefaultConfig(t *testing.T) {
|
||||
if err := DefaultConfig.Load("../config_test.yml", true); err != nil {
|
||||
t.Fatalf("Load config from yaml file, expect nil error but got error '%s'\n", err)
|
||||
}
|
||||
|
||||
if endpoint := GetAdminServerEndpoint(); endpoint != "http://127.0.0.1:8888" {
|
||||
t.Errorf("expect default admin server endpoint 'http://127.0.0.1:8888' but got '%s'\n", endpoint)
|
||||
}
|
||||
|
||||
redisURL := DefaultConfig.PoolConfig.RedisPoolCfg.RedisURL
|
||||
if redisURL != "redis://localhost:6379" {
|
||||
t.Errorf("expect redisURL '%s' but got '%s'\n", "redis://localhost:6379", redisURL)
|
||||
|
@ -272,6 +272,12 @@ func (t *Transfer) transferLayers(tag string, blobs []distribution.Descriptor) e
|
||||
}
|
||||
|
||||
digest := blob.Digest.String()
|
||||
|
||||
if blob.MediaType == schema2.MediaTypeForeignLayer {
|
||||
t.logger.Infof("blob %s of %s:%s is an foreign layer, skip", digest, repository, tag)
|
||||
continue
|
||||
}
|
||||
|
||||
exist, err := t.dstRegistry.BlobExist(digest)
|
||||
if err != nil {
|
||||
t.logger.Errorf("an error occurred while checking existence of blob %s of %s:%s on destination registry: %v",
|
||||
|
@ -124,7 +124,7 @@ func (bs *Bootstrap) LoadAndRun(ctx context.Context, cancel context.CancelFunc)
|
||||
apiServer.Stop()
|
||||
|
||||
// In case stop is called before the server is ready
|
||||
close := make(chan bool, 1)
|
||||
closeChan := make(chan bool, 1)
|
||||
go func() {
|
||||
timer := time.NewTimer(10 * time.Second)
|
||||
defer timer.Stop()
|
||||
@ -133,14 +133,14 @@ func (bs *Bootstrap) LoadAndRun(ctx context.Context, cancel context.CancelFunc)
|
||||
case <-timer.C:
|
||||
// Try again
|
||||
apiServer.Stop()
|
||||
case <-close:
|
||||
case <-closeChan:
|
||||
return
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
rootContext.WG.Wait()
|
||||
close <- true
|
||||
closeChan <- true
|
||||
|
||||
if err != nil {
|
||||
logger.Fatalf("Server exit with error: %s\n", err)
|
||||
|
@ -82,10 +82,14 @@ export class Configuration {
|
||||
email_password?: StringValueItem;
|
||||
email_insecure: BoolValueItem;
|
||||
verify_remote_cert: BoolValueItem;
|
||||
robot_token_duration: NumberValueItem;
|
||||
token_expiration: NumberValueItem;
|
||||
cfg_expiration: NumberValueItem;
|
||||
scan_all_policy: ComplexValueItem;
|
||||
read_only: BoolValueItem;
|
||||
http_authproxy_endpoint?: StringValueItem;
|
||||
http_authproxy_skip_cert_verify?: BoolValueItem;
|
||||
http_authproxy_always_onboard?: BoolValueItem;
|
||||
|
||||
public constructor() {
|
||||
this.auth_mode = new StringValueItem("db_auth", true);
|
||||
@ -117,6 +121,7 @@ export class Configuration {
|
||||
this.email_password = new StringValueItem("", true);
|
||||
this.email_insecure = new BoolValueItem(false, true);
|
||||
this.token_expiration = new NumberValueItem(30, true);
|
||||
this.robot_token_duration = new NumberValueItem(30, true);
|
||||
this.cfg_expiration = new NumberValueItem(30, true);
|
||||
this.verify_remote_cert = new BoolValueItem(false, true);
|
||||
this.scan_all_policy = new ComplexValueItem({
|
||||
@ -126,5 +131,8 @@ export class Configuration {
|
||||
}
|
||||
}, true);
|
||||
this.read_only = new BoolValueItem(false, true);
|
||||
this.http_authproxy_endpoint = new StringValueItem("", true);
|
||||
this.http_authproxy_skip_cert_verify = new BoolValueItem(false, true);
|
||||
this.http_authproxy_always_onboard = new BoolValueItem(false, true);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user