Merge remote-tracking branch 'upstream/replication_enhancement' into repEnhance

This commit is contained in:
Fuhui Peng (c) 2018-01-03 13:41:31 +08:00
commit 31c3063783
2380 changed files with 588320 additions and 1881 deletions

View File

@ -48,13 +48,9 @@ pipeline:
commands:
- du -ks harbor-offline-installer-*.tgz | awk '{print $1 / 1024}' | { read x; echo $x MB; }
- mkdir -p bundle
- mkdir -p pks-bundle
- echo $(git describe --tags) > pks-bundle/version
- cp harbor-offline-installer-*.tgz bundle
- if [ ${DRONE_BRANCH} = "master" ]; then cp harbor-offline-installer-*.tgz pks-bundle/harbor-offline-installer-latest-master.tgz; fi
- if ( echo ${DRONE_BRANCH} | grep "pks*" ); then cp harbor-offline-installer-*.tgz pks-bundle/harbor-offline-installer-latest-pks.tgz; fi
- cp harbor-offline-installer-*.tgz bundle/harbor-offline-installer-latest.tgz
- ls -la bundle
- ls -la pks-bundle
when:
repo: vmware/harbor
event: [ push, tag ]
@ -83,7 +79,7 @@ pipeline:
when:
repo: vmware/harbor
event: [ push, tag ]
branch: [ master, release-* ]
branch: [ master ]
status: success
publish-gcs-releases:
@ -100,20 +96,6 @@ pipeline:
branch: [ release-*, refs/tags/* ]
status: success
publish-gcs-pks-builds:
image: maplain/drone-gcs:latest
pull: true
source: pks-bundle
target: harbor-ci-pipeline-store/latest
acl:
- allUsers:READER
cache_control: public,max-age=3600
when:
repo: vmware/harbor
event: [ push, tag ]
branch: [ master, pks-*, refs/tags/* ]
status: success
trigger:
image: plugins/downstream
server: https://ci.vcna.io

View File

@ -1 +1 @@
eyJhbGciOiJIUzI1NiJ9.IyBIYXJib3IgZHJvbmUuCi0tLQp3b3Jrc3BhY2U6CiAgYmFzZTogL2Ryb25lCiAgcGF0aDogc3JjL2dpdGh1Yi5jb20vdm13YXJlL2hhcmJvcgoKcGlwZWxpbmU6CiAgY2xvbmU6CiAgICBpbWFnZTogcGx1Z2lucy9naXQKICAgIHRhZ3M6IHRydWUKICAgIHJlY3Vyc2l2ZTogZmFsc2UKCiAgaW50ZWdyYXRpb24tdGVzdC1vbi1wcjoKICAgIGltYWdlOiB2bXdhcmUvaGFyYm9yLWUyZS1lbmdpbmU6MS4zOAogICAgcHVsbDogdHJ1ZQogICAgcHJpdmlsZWdlZDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJJTjogYmluCiAgICAgIEdPUEFUSDogL2Ryb25lCiAgICAgIFNIRUxMOiAvYmluL2Jhc2gKICAgICAgTE9HX1RFTVBfRElSOiBpbnN0YWxsLWxvZ3MKICAgICAgR0lUSFVCX0FVVE9NQVRJT05fQVBJX0tFWTogICR7R0lUSFVCX0FVVE9NQVRJT05fQVBJX0tFWX0KICAgICAgRFJPTkVfU0VSVkVSOiAgJHtEUk9ORV9TRVJWRVJ9CiAgICAgIERST05FX1RPS0VOOiAgJHtEUk9ORV9UT0tFTl9JTlRFfQogICAgICBIQVJCT1JfQURNSU46ICR7SEFSQk9SX0FETUlOfQogICAgICBIQVJCT1JfUEFTU1dPUkQ6ICR7SEFSQk9SX1BBU1NXT1JEfQogICAgICBHU19QUk9KRUNUX0lEOiAke0dTX1BST0pFQ1RfSUR9CiAgICAgIEdTX0NMSUVOVF9FTUFJTDogJHtHU19DTElFTlRfRU1BSUx9CiAgICAgIEdTX1BSSVZBVEVfS0VZOiAke0dTX1BSSVZBVEVfS0VZfQogICAgICBET01BSU46ICR7Q0lfRE9NQUlOfQogICAgICBNQUlMX1BXRDogJHtNQUlMX1BXRH0KICAgICAgTlBNX1VTRVJOQU1FOiAke05QTV9VU0VSTkFNRX0KICAgICAgTlBNX1BBU1NXT1JEOiAke05QTV9QQVNTV09SRH0KICAgIGNvbW1hbmRzOgogICAgICAtIHRlc3RzL2ludGVncmF0aW9uLnNoCiAgICB3aGVuOgogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgYnVuZGxlOgogICAgaW1hZ2U6IHZtd2FyZS9oYXJib3ItZTJlLWVuZ2luZToxLjM4CiAgICBwdWxsOiB0cnVlCiAgICBwcml2aWxlZ2VkOiB0cnVlCiAgICBlbnZpcm9ubWVudDoKICAgICAgQklOOiBiaW4KICAgICAgR09QQVRIOiAvZHJvbmUKICAgICAgU0hFTEw6IC9iaW4vYmFzaAogICAgICBCVUlMRF9OVU1CRVI6ICR7RFJPTkVfQlVJTERfTlVNQkVSfQogICAgY29tbWFuZHM6CiAgICAgIC0gZHUgLWtzIGhhcmJvci1vZmZsaW5lLWluc3RhbGxlci0qLnRneiB8IGF3ayAne3ByaW50ICQxIC8gMTAyNH0nIHwgeyByZWFkIHg7IGVjaG8gJHggTUI7IH0KICAgICAgLSBta2RpciAtcCBidW5kbGUKICAgICAgLSBta2RpciAtcCBwa3MtYnVuZGxlCiAgICAgIC0gZWNobyAkKGdpdCBkZXNjcmliZSAtLXRhZ3MpID4gcGtzLWJ1bmRsZS92ZXJzaW9uIAogICAgICAtIGNwIGhhcmJvci1vZmZsaW5lLWluc3RhbGxlci0qLnRneiBidW5kbGUKICAgICAgLSBpZiBbICR7RFJPTkVfQlJBTkNIfSA9ICJtYXN0ZXIiIF07IHRoZW4gY3AgaGFyYm9yLW9mZmxpbmUtaW5zdGFsbGVyLSoudGd6IHBrcy1idW5kbGUvaGFyYm9yLW9mZmxpbmUtaW5zdGFsbGVyLWxhdGVzdC1tYXN0ZXIudGd6OyBmaQogICAgICAtIGlmICggZWNobyAke0RST05FX0JSQU5DSH0gfCBncmVwICJwa3MqIiApOyB0aGVuIGNwIGhhcmJvci1vZmZsaW5lLWluc3RhbGxlci0qLnRneiBwa3MtYnVuZGxlL2hhcmJvci1vZmZsaW5lLWluc3RhbGxlci1sYXRlc3QtcGtzLnRnejsgZmkKICAgICAgLSBscyAtbGEgYnVuZGxlCiAgICAgIC0gbHMgLWxhIHBrcy1idW5kbGUKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS9oYXJib3IKICAgICAgZXZlbnQ6IFsgcHVzaCwgdGFnIF0KICAgICAgYnJhbmNoOiBbIG1hc3RlciwgcmVsZWFzZS0qLCBwa3MtKiwgcmVmcy90YWdzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgbm90aWZ5LXNsYWNrOgogICAgaW1hZ2U6IHBsdWdpbnMvc2xhY2sKICAgIHdlYmhvb2s6ICR7U0xBQ0tfVVJMfQogICAgdXNlcm5hbWU6IGRyb25lCiAgICB0ZW1wbGF0ZTogPgogICAgICBidWlsZCBodHRwczovL2NpLnZjbmEuaW8vdm13YXJlL2hhcmJvci97eyBidWlsZC5udW1iZXIgfX0gZmluaXNoZWQgd2l0aCBhIHt7IGJ1aWxkLnN0YXR1cyB9fSBzdGF0dXMuIFBsZWFzZSBmaW5kIGxvZ3MgYXQgaHR0cHM6Ly9zdG9yYWdlLmdvb2dsZWFwaXMuY29tL2hhcmJvci1jaS1sb2dzL2ludGVncmF0aW9uX2xvZ3Nfe3sgYnVpbGQubnVtYmVyIH19X3t7IGJ1aWxkLmNvbW1pdCB9fS50YXIuZ3oKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS9oYXJib3IKICAgICAgYnJhbmNoOiBbIG1hc3RlciwgcmVsZWFzZS0qLCByZWZzL3RhZ3MvKiBdCiAgICAgIHN0YXR1czogWyBmYWlsdXJlLCBzdWNjZXNzIF0KCiAgcHVibGlzaC1nY3MtYnVpbGRzOgogICAgaW1hZ2U6IG1hcGxhaW4vZHJvbmUtZ2NzOmxhdGVzdAogICAgcHVsbDogdHJ1ZQogICAgc291cmNlOiBidW5kbGUKICAgIHRhcmdldDogaGFyYm9yLWJ1aWxkcwogICAgYWNsOgogICAgICAtIGFsbFVzZXJzOlJFQURFUgogICAgY2FjaGVfY29udHJvbDogcHVibGljLG1heC1hZ2U9MzYwMAogICAgd2hlbjoKICAgICAgcmVwbzogdm13YXJlL2hhcmJvcgogICAgICBldmVudDogWyBwdXNoLCB0YWcgXQogICAgICBicmFuY2g6IFsgbWFzdGVyLCByZWxlYXNlLSogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgcHVibGlzaC1nY3MtcmVsZWFzZXM6CiAgICBpbWFnZTogbWFwbGFpbi9kcm9uZS1nY3M6bGF0ZXN0CiAgICBwdWxsOiB0cnVlCiAgICBzb3VyY2U6IGJ1bmRsZQogICAgdGFyZ2V0OiBoYXJib3ItcmVsZWFzZXMKICAgIGFjbDoKICAgICAgLSBhbGxVc2VyczpSRUFERVIKICAgIGNhY2hlX2NvbnRyb2w6IHB1YmxpYyxtYXgtYWdlPTM2MDAKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS9oYXJib3IKICAgICAgZXZlbnQ6IFsgcHVzaCwgdGFnIF0KICAgICAgYnJhbmNoOiBbIHJlbGVhc2UtKiwgcmVmcy90YWdzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgcHVibGlzaC1nY3MtcGtzLWJ1aWxkczoKICAgIGltYWdlOiBtYXBsYWluL2Ryb25lLWdjczpsYXRlc3QKICAgIHB1bGw6IHRydWUKICAgIHNvdXJjZTogcGtzLWJ1bmRsZQogICAgdGFyZ2V0OiBoYXJib3ItY2ktcGlwZWxpbmUtc3RvcmUvbGF0ZXN0CiAgICBhY2w6CiAgICAgIC0gYWxsVXNlcnM6UkVBREVSCiAgICBjYWNoZV9jb250cm9sOiBwdWJsaWMsbWF4LWFnZT0zNjAwCiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvaGFyYm9yCiAgICAgIGV2ZW50OiBbIHB1c2gsIHRhZyBdCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHBrcy0qLCByZWZzL3RhZ3MvKiBdCiAgICAgIHN0YXR1czogc3VjY2VzcwoKICB0cmlnZ2VyOgogICAgaW1hZ2U6IHBsdWdpbnMvZG93bnN0cmVhbQogICAgc2VydmVyOiBodHRwczovL2NpLnZjbmEuaW8KICAgIHRva2VuOiAke0RPV05TVFJFQU1fVE9LRU59CiAgICBmb3JrOiB0cnVlCiAgICByZXBvc2l0b3JpZXM6CiAgICAgICAtIHZtd2FyZS92aWMtcHJvZHVjdAogICAgd2hlbjoKICAgICAgcmVwbzogdm13YXJlL2hhcmJvcgogICAgICBldmVudDogWyBwdXNoLCB0YWcgXQogICAgICBicmFuY2g6IFsgbWFzdGVyLCByZWxlYXNlLSosIHJlZnMvdGFncy8qIF0KICAgICAgc3RhdHVzOiBzdWNjZXNzCg.TRzg0jvokGI8PBccqkW4foVBX_1uGzFUhTRaPzMFaeY
eyJhbGciOiJIUzI1NiJ9.IyBIYXJib3IgZHJvbmUuCi0tLQp3b3Jrc3BhY2U6CiAgYmFzZTogL2Ryb25lCiAgcGF0aDogc3JjL2dpdGh1Yi5jb20vdm13YXJlL2hhcmJvcgoKcGlwZWxpbmU6CiAgY2xvbmU6CiAgICBpbWFnZTogcGx1Z2lucy9naXQKICAgIHRhZ3M6IHRydWUKICAgIHJlY3Vyc2l2ZTogZmFsc2UKCiAgaW50ZWdyYXRpb24tdGVzdC1vbi1wcjoKICAgIGltYWdlOiB2bXdhcmUvaGFyYm9yLWUyZS1lbmdpbmU6MS4zOAogICAgcHVsbDogdHJ1ZQogICAgcHJpdmlsZWdlZDogdHJ1ZQogICAgZW52aXJvbm1lbnQ6CiAgICAgIEJJTjogYmluCiAgICAgIEdPUEFUSDogL2Ryb25lCiAgICAgIFNIRUxMOiAvYmluL2Jhc2gKICAgICAgTE9HX1RFTVBfRElSOiBpbnN0YWxsLWxvZ3MKICAgICAgR0lUSFVCX0FVVE9NQVRJT05fQVBJX0tFWTogICR7R0lUSFVCX0FVVE9NQVRJT05fQVBJX0tFWX0KICAgICAgRFJPTkVfU0VSVkVSOiAgJHtEUk9ORV9TRVJWRVJ9CiAgICAgIERST05FX1RPS0VOOiAgJHtEUk9ORV9UT0tFTl9JTlRFfQogICAgICBIQVJCT1JfQURNSU46ICR7SEFSQk9SX0FETUlOfQogICAgICBIQVJCT1JfUEFTU1dPUkQ6ICR7SEFSQk9SX1BBU1NXT1JEfQogICAgICBHU19QUk9KRUNUX0lEOiAke0dTX1BST0pFQ1RfSUR9CiAgICAgIEdTX0NMSUVOVF9FTUFJTDogJHtHU19DTElFTlRfRU1BSUx9CiAgICAgIEdTX1BSSVZBVEVfS0VZOiAke0dTX1BSSVZBVEVfS0VZfQogICAgICBET01BSU46ICR7Q0lfRE9NQUlOfQogICAgICBNQUlMX1BXRDogJHtNQUlMX1BXRH0KICAgICAgTlBNX1VTRVJOQU1FOiAke05QTV9VU0VSTkFNRX0KICAgICAgTlBNX1BBU1NXT1JEOiAke05QTV9QQVNTV09SRH0KICAgIGNvbW1hbmRzOgogICAgICAtIHRlc3RzL2ludGVncmF0aW9uLnNoCiAgICB3aGVuOgogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgYnVuZGxlOgogICAgaW1hZ2U6IHZtd2FyZS9oYXJib3ItZTJlLWVuZ2luZToxLjM4CiAgICBwdWxsOiB0cnVlCiAgICBwcml2aWxlZ2VkOiB0cnVlCiAgICBlbnZpcm9ubWVudDoKICAgICAgQklOOiBiaW4KICAgICAgR09QQVRIOiAvZHJvbmUKICAgICAgU0hFTEw6IC9iaW4vYmFzaAogICAgICBCVUlMRF9OVU1CRVI6ICR7RFJPTkVfQlVJTERfTlVNQkVSfQogICAgY29tbWFuZHM6CiAgICAgIC0gZHUgLWtzIGhhcmJvci1vZmZsaW5lLWluc3RhbGxlci0qLnRneiB8IGF3ayAne3ByaW50ICQxIC8gMTAyNH0nIHwgeyByZWFkIHg7IGVjaG8gJHggTUI7IH0KICAgICAgLSBta2RpciAtcCBidW5kbGUKICAgICAgLSBjcCBoYXJib3Itb2ZmbGluZS1pbnN0YWxsZXItKi50Z3ogYnVuZGxlCiAgICAgIC0gY3AgaGFyYm9yLW9mZmxpbmUtaW5zdGFsbGVyLSoudGd6IGJ1bmRsZS9oYXJib3Itb2ZmbGluZS1pbnN0YWxsZXItbGF0ZXN0LnRnegogICAgICAtIGxzIC1sYSBidW5kbGUKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS9oYXJib3IKICAgICAgZXZlbnQ6IFsgcHVzaCwgdGFnIF0KICAgICAgYnJhbmNoOiBbIG1hc3RlciwgcmVsZWFzZS0qLCBwa3MtKiwgcmVmcy90YWdzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MKCiAgbm90aWZ5LXNsYWNrOgogICAgaW1hZ2U6IHBsdWdpbnMvc2xhY2sKICAgIHdlYmhvb2s6ICR7U0xBQ0tfVVJMfQogICAgdXNlcm5hbWU6IGRyb25lCiAgICB0ZW1wbGF0ZTogPgogICAgICBidWlsZCBodHRwczovL2NpLnZjbmEuaW8vdm13YXJlL2hhcmJvci97eyBidWlsZC5udW1iZXIgfX0gZmluaXNoZWQgd2l0aCBhIHt7IGJ1aWxkLnN0YXR1cyB9fSBzdGF0dXMuIFBsZWFzZSBmaW5kIGxvZ3MgYXQgaHR0cHM6Ly9zdG9yYWdlLmdvb2dsZWFwaXMuY29tL2hhcmJvci1jaS1sb2dzL2ludGVncmF0aW9uX2xvZ3Nfe3sgYnVpbGQubnVtYmVyIH19X3t7IGJ1aWxkLmNvbW1pdCB9fS50YXIuZ3oKICAgIHdoZW46CiAgICAgIHJlcG86IHZtd2FyZS9oYXJib3IKICAgICAgYnJhbmNoOiBbIG1hc3RlciwgcmVsZWFzZS0qLCByZWZzL3RhZ3MvKiBdCiAgICAgIHN0YXR1czogWyBmYWlsdXJlLCBzdWNjZXNzIF0KCiAgcHVibGlzaC1nY3MtYnVpbGRzOgogICAgaW1hZ2U6IG1hcGxhaW4vZHJvbmUtZ2NzOmxhdGVzdAogICAgcHVsbDogdHJ1ZQogICAgc291cmNlOiBidW5kbGUKICAgIHRhcmdldDogaGFyYm9yLWJ1aWxkcwogICAgYWNsOgogICAgICAtIGFsbFVzZXJzOlJFQURFUgogICAgY2FjaGVfY29udHJvbDogcHVibGljLG1heC1hZ2U9MzYwMAogICAgd2hlbjoKICAgICAgcmVwbzogdm13YXJlL2hhcmJvcgogICAgICBldmVudDogWyBwdXNoLCB0YWcgXQogICAgICBicmFuY2g6IFsgbWFzdGVyIF0KICAgICAgc3RhdHVzOiBzdWNjZXNzCgogIHB1Ymxpc2gtZ2NzLXJlbGVhc2VzOgogICAgaW1hZ2U6IG1hcGxhaW4vZHJvbmUtZ2NzOmxhdGVzdAogICAgcHVsbDogdHJ1ZQogICAgc291cmNlOiBidW5kbGUKICAgIHRhcmdldDogaGFyYm9yLXJlbGVhc2VzCiAgICBhY2w6CiAgICAgIC0gYWxsVXNlcnM6UkVBREVSCiAgICBjYWNoZV9jb250cm9sOiBwdWJsaWMsbWF4LWFnZT0zNjAwCiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvaGFyYm9yCiAgICAgIGV2ZW50OiBbIHB1c2gsIHRhZyBdCiAgICAgIGJyYW5jaDogWyByZWxlYXNlLSosIHJlZnMvdGFncy8qIF0KICAgICAgc3RhdHVzOiBzdWNjZXNzCgogIHRyaWdnZXI6CiAgICBpbWFnZTogcGx1Z2lucy9kb3duc3RyZWFtCiAgICBzZXJ2ZXI6IGh0dHBzOi8vY2kudmNuYS5pbwogICAgdG9rZW46ICR7RE9XTlNUUkVBTV9UT0tFTn0KICAgIGZvcms6IHRydWUKICAgIHJlcG9zaXRvcmllczoKICAgICAgIC0gdm13YXJlL3ZpYy1wcm9kdWN0CiAgICB3aGVuOgogICAgICByZXBvOiB2bXdhcmUvaGFyYm9yCiAgICAgIGV2ZW50OiBbIHB1c2gsIHRhZyBdCiAgICAgIGJyYW5jaDogWyBtYXN0ZXIsIHJlbGVhc2UtKiwgcmVmcy90YWdzLyogXQogICAgICBzdGF0dXM6IHN1Y2Nlc3MK.0TeBeHyYbP8xrqxi1RUDjXnB0tqChcuuDhNv4hbbUJs

View File

@ -79,7 +79,7 @@ script:
- sudo mkdir -p /harbor
- sudo mv ./VERSION /harbor/VERSION
- sudo service mysql stop
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.2.7
- sudo make run_clarity_ut CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.3.0
- cat ./src/ui_ng/lib/npm-ut-test-results
- sudo ./tests/testprepare.sh
- sudo make -f make/photon/Makefile -e MARIADBVERSION=10.2.10 -e VERSIONTAG=dev
@ -106,7 +106,7 @@ script:
- sudo rm -rf /data/config/*
- sudo rm -rf /data/database/*
- ls /data/cert
- sudo make install GOBUILDIMAGE=golang:1.7.3 COMPILETAG=compile_golangimage CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.2.7 NOTARYFLAG=true CLAIRFLAG=true
- sudo make install GOBUILDIMAGE=golang:1.9.2 COMPILETAG=compile_golangimage CLARITYIMAGE=vmware/harbor-clarity-ui-builder:1.3.0 NOTARYFLAG=true CLAIRFLAG=true
- sleep 10
- docker ps
- ./tests/validatecontainers.sh

View File

@ -34,6 +34,9 @@ Please submit a PR to contain changes bit by bit. A PR consisting of a lot featu
You can propose new designs for existing Harbor features. You can also design
entirely new features. Please do open an issue on Github for discussion first. This is necessary to ensure the overall architecture is consistent and to avoid duplicated work in the roadmap.
### Dependency management
Harbor uses [dep](https://github.com/golang/dep) for dependency management of go code. The official maintainers will take the responsibility for managing the code in `vendor` directory. Please don't try to submit PR to update the dependency code, open an issue instead. If your PR requires a change in the vendor code please make sure you discuss it with maintainers in advance.
### Conventions
Fork Harbor's repository and make changes on your own fork in a new branch. The branch should be named XXX-description where XXX is the number of the issue. Please run the full test suite on your branch before creating a PR.

View File

@ -210,7 +210,7 @@ DOCKERSAVE_PARA=$(DOCKERIMAGENAME_ADMINSERVER):$(VERSIONTAG) \
$(DOCKERIMAGENAME_LOG):$(VERSIONTAG) \
$(DOCKERIMAGENAME_DB):$(VERSIONTAG) \
$(DOCKERIMAGENAME_JOBSERVICE):$(VERSIONTAG) \
vmware/nginx-photon:$(NGINXVERSION)-$(VERSIONTAG) vmware/registry-photon:$(REGISTRYVERSION)-$(VERSIONTAG) \
vmware/nginx-photon:$(NGINXVERSION) vmware/registry-photon:$(REGISTRYVERSION)-$(VERSIONTAG) \
vmware/photon:$(PHOTONVERSION)
PACKAGE_OFFLINE_PARA=-zcvf harbor-offline-installer-$(GITTAGVERSION).tgz \
$(HARBORPKG)/common/templates $(HARBORPKG)/$(DOCKERIMGFILE).$(VERSIONTAG).tar.gz \
@ -227,13 +227,13 @@ DOCKERCOMPOSE_LIST=-f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSEFILENAME)
ifeq ($(NOTARYFLAG), true)
DOCKERSAVE_PARA+= vmware/notary-server-photon:$(NOTARYVERSION)-$(VERSIONTAG) vmware/notary-signer-photon:$(NOTARYVERSION)-$(VERSIONTAG) \
vmware/mariadb-photon:$(MARIADBVERSION)-$(VERSIONTAG)
vmware/mariadb-photon:$(MARIADBVERSION)
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSENOTARYFILENAME)
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSENOTARYFILENAME)
endif
ifeq ($(CLAIRFLAG), true)
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG) vmware/postgresql-photon:$(CLAIRDBVERSION)-$(VERSIONTAG)
DOCKERSAVE_PARA+= vmware/clair-photon:$(CLAIRVERSION)-$(VERSIONTAG) vmware/postgresql-photon:$(CLAIRDBVERSION)
PACKAGE_OFFLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
PACKAGE_ONLINE_PARA+= $(HARBORPKG)/$(DOCKERCOMPOSECLAIRFILENAME)
DOCKERCOMPOSE_LIST+= -f $(DOCKERCOMPOSEFILEPATH)/$(DOCKERCOMPOSECLAIRFILENAME)
@ -336,10 +336,10 @@ package_online: modify_composefile
package_offline: compile version build modify_sourcefiles modify_composefile
@echo "packing offline package ..."
@cp -r make $(HARBORPKG)
@cp LICENSE $(HARBORPKG)/LICENSE
@cp NOTICE $(HARBORPKG)/NOTICE
@cp $(HARBORPKG)/common/db/registry.sql $(HARBORPKG)/ha/
@cp $(HARBORPKG)/photon/db/registry.sql $(HARBORPKG)/ha/
@if [ "$(MIGRATORFLAG)" = "true" ] ; then \
echo "pulling DB migrator..."; \
$(DOCKERPULL) vmware/harbor-db-migrator:$(MIGRATORVERSION); \

Binary file not shown.

After

Width:  |  Height:  |  Size: 260 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 265 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 277 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 276 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 164 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 151 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 341 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 272 KiB

111
docs/install_guide_ova.md Normal file
View File

@ -0,0 +1,111 @@
# Deploying Harbor from OVA
**Prerequisites**
- Download the build of the OVA installer from the **[official release](https://github.com/vmware/harbor/releases)** page.
- Import the appliance to a vCenter Server instance. Deploying the appliance directly on an ESXi host is not supported.
- The system requirements are as follows:
- vCenter Server 6.0 or 6.5.
- ESXi 6.0 or 6.5 for all hosts.
- 2 vCPUs or more.
- 8GB RAM or more.
- At least 80GB free disk space on the datastore.
- Ensure that vCenter user has the following privileges:
- Datastore > Allocate space
- Datastore > Low level file Operations
- Folder > Create Folder
- Folder > Delete Folder
- Network > Assign network
- Resource > Assign virtual machine to resource pool
- Virtual machine > Configuration > Add new disk
- Virtual Machine > Configuration > Add existing disk
- Virtual Machine > Configuration > Add or remove device
- Virtual Machine > Configuration > Change CPU count
- Virtual Machine > Configuration > Change resource
- Virtual Machine > Configuration > Memory
- Virtual Machine > Configuration > Modify device settings
- Virtual Machine > Configuration > Remove disk
- Virtual Machine > Configuration > Rename
- Virtual Machine > Configuration > Settings
- Virtual machine > Configuration > Advanced
- Virtual Machine > Interaction > Power off
- Virtual Machine > Interaction > Power on
- Virtual Machine > Inventory > Create from existing
- Virtual Machine > Inventory > Create new
- Virtual Machine > Inventory > Remove
- Virtual Machine > Provisioning > Clone virtual machine
- Virtual Machine > Provisioning > Customize
- Virtual Machine > Provisioning > Read customization specifications
- vApp > Import
- Profile-driven storage -> Profile-driven storage view
- Ensure that all vCenter Server instances and ESXi hosts in the environment in which you are deploying the appliance have network time protocol (NTP) running. Running NTP prevents problems arising from clock skew between Harbor and its clients.
- Use the Flex-based vSphere Web Client to deploy the appliance. You cannot deploy Harbor OVA file from the HTML5 vSphere Client or from the legacy Windows client.
**Procedure**
1. In the vSphere Web Client, right-click a host in the vCenter Server inventory, select **Deploy OVF template**
![Screenshot of Deploy OVF template](img/ovainstall/DeployOVFmenu.png)
2. Select template: navigate to the OVA file or input the URL of the ova file in URL field.
![Screenshot of Import ova](img/ovainstall/importova.png)
3. Follow the installer prompts to perform basic configuration of the appliance and to select the vSphere resources for it to use.
- Accept or modify the appliance name.
- Select the destination datacenter or folder:
![Screenshot of appliance name](img/ovainstall/namelocation.png)
- Select the destination host, cluster, or resource pool:
![Screenshot of resource pool](img/ovainstall/resource.png)
- Select the disk format and the destination datastore:
![Screenshot of datastore](img/ovainstall/datastore.png)
- Select the network that the appliance connects to:
![Screenshot of network](img/ovainstall/network.png)
4. On the **Customize template** page, configure Harbors SSL certificates. There are two options for SSL: auto-generated certificate and customized certificate.
- Auto-generated certificate. Leave blank the fields of CA Certificate, Server Certificate and Server Key. Go to Step 5.
- Customized certificate. If you need to customize Harbor CA Certificate, Server Certificate and Server Key, copy and paste in the content of those files into the corresponding text boxes. Remember to include all content of the files. Because the Harbor OVA is launched with Full Qualified Domain Name (FQDN), the certificate should be generated with FQDN of the host. The host should be configured with the same FQDN in Step 7.
![Screenshot of customize cert](img/ovainstall/custom_cert.png)
5. In the section of Harbor Configuration, select the Authentication Mode and set the Administrator Password. If Authentication Mode is set to ldap_auth, LDAP configuration in the next section is required. **Note:** The ldap_auth mode is for both LDAP server and Active Directory.
![Screenshot of customizing harbor](img/ovainstall/customizeharbor.png)
If the Authentication Mode is set to ldap_auth, you need to configure Harbor's LDAP settings:
- LDAP Base DN: The base DN to look up users.
- LDAP UID: The attribute to match a user, such as uid, cn, email or other attributes.
- LDAP URL: The URL for LDAP endpoint.
- Search DN: The user's DN who has the permission to search the LDAP server.
- Search DN Password: The password for search DN.
- Search Filter: The filter to search users.
- Search Scope: The scope to search users.
![Screenshot of customizing LDAP](img/ovainstall/customizeldap.png)
Refer to **[Harbor's Installation Guide](installation_guide.md)** for more information about these settings.
6. On the **Customize template** page, under **System**, set the root password for the appliance VM and the option for **Permit Root Login**.
Setting the root password for the appliance is mandatory.
- If you want to have SSH access to the Harbor appliance for troubleshooting, set **Permit Root Login** to true.
![Screenshot of customizing template system](img/ovainstall/system.png)
7. Expand **Networking Properties** and optionally configure a static IP address for the appliance VM.
- To use DHCP, leave the **Networking Properties** blank.
- If a customized SSL certificate is configured, you need to configure Domain Name the same as the FQDN of the certificate in Step 4.
![Screenshot of network detail](img/ovainstall/network2.png)
**IMPORTANT**: If you set a static IP address for the appliance, use spaces to separate DNS servers. Do not use comma separation for DNS servers.
8. When the deployment completes, refresh the current page and power on the appliance VM. It will take several minutes after powering on as it needs to load Docker images. 
![Screenshot of power on](img/ovainstall/poweron.png)
Go to the **Summary** tab of the appliance VM and note the DNS Name.
9. (Optional) If you provided a static network configuration, view the network status of the appliance.
1. In the **Summary** tab of the appliance VM, launch the VM console.
2. In the VM console, press the right arrow key.
The network status shows whether the network settings that you provided during the deployment match the settings with which the appliance is running. If there are mismatches, power off the appliance and select **Edit Settings** > **vApp Options** to correct the network settings.
10. In a browser, go to https://*<DNS Name>*. The *<DNS Name>* is noted in Step 7. When prompted, enter the username admin and the password of admin set in Step 4. 
![Screenshot of login harbor](img/ovainstall/login.png)
If everything worked properly, you should see the administration console. Refer to **[Harbor User Guide](user_guide.md)** for how to use Harbor.
![Screenshot of after login](img/ovainstall/afterlogin.png)

View File

@ -1,10 +1,11 @@
# Installation and Configuration Guide
Harbor can be installed by one of two approaches:
Harbor can be installed by one of three approaches:
- **Online installer:** The installer downloads Harbor's images from Docker hub. For this reason, the installer is very small in size.
- **Offline installer:** Use this installer when the host does not have an Internet connection. The installer contains pre-built images so its size is larger.
- **OVA installer:** Use this installer when user have a vCenter environment, Harbor is launched after OVA deployed. Detail information please refer **[Harbor OVA install guide](install_guide_ova.md)**
All installers can be downloaded from the **[official release](https://github.com/vmware/harbor/releases)** page.

View File

@ -4,7 +4,7 @@ This Document decribes how to deploy Harbor on Kubernetes. It has been verified
### Prerequisite
* You should have domain knowledge about Kubernetes (Replication Controller, Service, Persistent Volume, Persistent Volume Claim, Config Map).
* You should have domain knowledge about Kubernetes (Deployment, Service, Persistent Volume, Persistent Volume Claim, Config Map, Ingress).
* **Optional**: Load the docker images onto woker nodes. *If you skip this step, worker node will pull images from Docker Hub when starting the pods.*
* Download the offline installer of Harbor v1.2.0 from the [release](https://github.com/vmware/harbor/releases) page.
* Uncompress the offline installer and get the images tgz file harbor.*.tgz, transfer it to each of the worker nodes.
@ -34,23 +34,8 @@ These Basic Configuration must be set. Otherwise you can't deploy Harbor on Kube
#To accept access from outside of Kubernetes cluster, it should be set to a worker node.
hostname = 10.192.168.5
```
- `make/kubernetes/**/*.svc.yaml`: Specify the service of pods. In particular, the externalIP should be set in `make/kubernetes/nginx/nginx.svc.yaml`:
```yaml
...
metadata:
name: nginx
spec:
ports:
- name: http
port: 80
selector:
name: nginx-apps
externalIPs:
- 10.192.168.5
```
- `make/kubernetes/**/*.rc.yaml`: Specify configs of containers.
- `make/kubernetes/**/*.svc.yaml`: Specify the service of pods.
- `make/kubernetes/**/*.deploy.yaml`: Specify configs of containers.
- `make/kubernetes/pv/*.pvc.yaml`: Persistent Volume Claim.
You can set capacity of storage in these files. example:
@ -91,10 +76,10 @@ These files will be generated:
- make/kubernetes/jobservice/jobservice.cm.yaml
- make/kubernetes/mysql/mysql.cm.yaml
- make/kubernetes/nginx/nginx.cm.yaml
- make/kubernetes/registry/registry.cm.yaml
- make/kubernetes/ui/ui.cm.yaml
- make/kubernetes/adminserver/adminserver.cm.yaml
- make/kubernetes/ingress.yaml
#### Advanced Configuration
If Basic Configuration was not covering your requirements, you can read this section for more details.
@ -108,7 +93,7 @@ You can find all configs of Harbor in `make/kubernetes/templates/`. There are sp
- `jobservice.cm.yaml`: ENV and web config of jobservice
- `mysql.cm.yaml`: Root passowrd of MySQL
- `nginx.cm.yaml`: Https certification and nginx config. If you are fimiliar with nginx, you can modify it.
- `ingress.yaml`: Https certification and ingress config. If you are fimiliar with ingress, you can modify it.
- `registry.cm.yaml`: Token service certification and registry config
Registry use filesystem to store data of images. You can find it like:
@ -140,7 +125,6 @@ kubectl apply -f make/kubernetes/pv/storage.pvc.yaml
# create config map
kubectl apply -f make/kubernetes/jobservice/jobservice.cm.yaml
kubectl apply -f make/kubernetes/mysql/mysql.cm.yaml
kubectl apply -f make/kubernetes/nginx/nginx.cm.yaml
kubectl apply -f make/kubernetes/registry/registry.cm.yaml
kubectl apply -f make/kubernetes/ui/ui.cm.yaml
kubectl apply -f make/kubernetes/adminserver/adminserver.cm.yaml
@ -148,23 +132,24 @@ kubectl apply -f make/kubernetes/adminserver/adminserver.cm.yaml
# create service
kubectl apply -f make/kubernetes/jobservice/jobservice.svc.yaml
kubectl apply -f make/kubernetes/mysql/mysql.svc.yaml
kubectl apply -f make/kubernetes/nginx/nginx.svc.yaml
kubectl apply -f make/kubernetes/registry/registry.svc.yaml
kubectl apply -f make/kubernetes/ui/ui.svc.yaml
kubectl apply -f make/kubernetes/adminserver/adminserver.svc.yaml
# create k8s rc
kubectl apply -f make/kubernetes/registry/registry.rc.yaml
kubectl apply -f make/kubernetes/mysql/mysql.rc.yaml
kubectl apply -f make/kubernetes/jobservice/jobservice.rc.yaml
kubectl apply -f make/kubernetes/ui/ui.rc.yaml
kubectl apply -f make/kubernetes/nginx/nginx.rc.yaml
kubectl apply -f make/kubernetes/adminserver/adminserver.rc.yaml
# create k8s deployment
kubectl apply -f make/kubernetes/registry/registry.deploy.yaml
kubectl apply -f make/kubernetes/mysql/mysql.deploy.yaml
kubectl apply -f make/kubernetes/jobservice/jobservice.deploy.yaml
kubectl apply -f make/kubernetes/ui/ui.deploy.yaml
kubectl apply -f make/kubernetes/adminserver/adminserver.deploy.yaml
# create k8s ingress
kubectl apply -f make/kubernetes/ingress.yaml
```
After the pods are running, you can access Harbor's UI via the configured endpoint `10.192.168.5` or issue docker commands such as `docker login 10.192.168.5` to interact with the registry.
#### Limitation
1. Current deployment is http only, to enable https you need to either add another layer of proxy or modify the nginx.cm.yaml to enable https and include a correct certificate
1. Current deployment is http only, to enable https you need to either add another layer of proxy or modify the ingress.yaml to enable https and include a correct certificate
2. Current deployment does not include Clair and Notary, which are supported in docker-compose deployment. They will be supported in near future, stay tuned.

View File

@ -1391,6 +1391,32 @@ paths:
description: User need to login first.
'500':
description: Unexpected internal errors.
put:
summary: Update status of jobs. Only stop is supported for now.
description: >
The endpoint is used to stop the replication jobs of a policy.
tags:
- Products
parameters:
- name: policyinfo
in: body
description: The policy ID and status.
required: true
schema:
$ref: '#/definitions/UpdateJobs'
responses:
'200':
description: Update the status successfully.
'400':
description: Bad request because of invalid parameters.
'401':
description: User need to login first.
'403':
description: User has no privilege for the operation.
'404':
description: Resource requested does not exist.
'500':
description: Unexpected internal errors.
/jobs/replication/{id}:
delete:
summary: Delete specific ID job.
@ -2414,9 +2440,22 @@ definitions:
kind:
type: string
description: The replication policy trigger kind. The valid values are manual, immediate and schedule.
param:
schedule_param:
$ref: '#/definitions/ScheduleParam'
ScheduleParam:
type: object
properties:
type:
type: string
description: The replication policy trigger parameters.
description: The schedule type. The valid values are daily and weekly.
weekday:
type: integer
format: int8
description: Optional, only used when the type is weedly. The valid values are 1-7.
offtime:
type: integer
format: int64
description: The time offset with the UTC 00:00 in seconds.
RepFilter:
type: object
properties:
@ -2927,5 +2966,12 @@ definitions:
description:
type: string
description: The description of the repository.
UpdateJobs:
type: object
properties:
policy_id:
type: integer
description: The ID of replication policy
status:
type: string
description: The status of jobs. The only valid value is stop for now.

View File

@ -11,7 +11,7 @@ LDAP_FILTER=$ldap_filter
LDAP_UID=$ldap_uid
LDAP_SCOPE=$ldap_scope
LDAP_TIMEOUT=$ldap_timeout
LDAP_VERIFY_CERT=true
LDAP_VERIFY_CERT=$ldap_verify_cert
DATABASE_TYPE=mysql
MYSQL_HOST=$db_host
MYSQL_PORT=$db_port
@ -39,10 +39,15 @@ GODEBUG=netdns=cgo
ADMIRAL_URL=$admiral_url
WITH_NOTARY=$with_notary
WITH_CLAIR=$with_clair
CLAIR_DB_PASSWORD=$pg_password
CLAIR_DB_PASSWORD=$clair_db_password
CLAIR_DB_HOST=$clair_db_host
CLAIR_DB_PORT=$clair_db_port
CLAIR_DB_USERNAME=$clair_db_username
CLAIR_DB=$clair_db
RESET=false
UAA_ENDPOINT=$uaa_endpoint
UAA_CLIENTID=$uaa_clientid
UAA_CLIENTSECRET=$uaa_clientsecret
UAA_VERIFY_CERT=$uaa_verify_cert
UI_URL=http://ui:8080
JOBSERVICE_URL=http://jobservice:8080

View File

@ -2,7 +2,7 @@ clair:
database:
type: pgsql
options:
source: postgresql://postgres:$password@postgres:5432?sslmode=disable
source: postgresql://$username:$password@$host:$port?sslmode=disable
# Number of elements kept in the cache
# Values unlikely to change (e.g. namespaces) are cached in order to save prevent needless roundtrips to the database.

View File

@ -6,6 +6,12 @@ RUN mkdir -p /harbor_src
COPY src/ui_ng/package.json /harbor_resources
COPY make/dev/nodeclarity/entrypoint.sh /
# Install Chrome
RUN wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add -
RUN echo "deb http://dl.google.com/linux/chrome/deb/ stable main" | tee /etc/apt/sources.list.d/google-chrome.list
RUN apt-get update && apt-get -y install google-chrome-stable
# Install npm package
WORKDIR /harbor_resources
RUN npm __proxy__ install -g @angular/cli && \

View File

@ -95,6 +95,9 @@ ldap_scope = 3
#Timeout (in seconds) when connecting to an LDAP Server. The default value (and most reasonable) is 5 seconds.
ldap_timeout = 5
#Verify certificate from LDAP server
ldap_verify_cert = true
#Turn on or off the self-registration feature
self_registration = on
@ -107,7 +110,7 @@ token_expiration = 30
project_creation_restriction = everyone
#The follow configurations are for Harbor HA mode only
#####################################################
#the address of the mysql database.
db_host = mysql
@ -118,10 +121,25 @@ db_port = 3306
db_user = root
#The redis server address
redis_url =
#Clair DB host address
clair_db_host = postgres
#Clair DB connect port
clair_db_port = 5432
#Clair DB username
clair_db_username = postgres
#Clair default database
clair_db = postgres
################### end of HA section #####################
#************************END INITIAL PROPERTIES************************
#The following attributes only need to be set when auth mode is uaa_auth
uaa_endpoint = uaa.mydomain.org
uaa_clientid= id
uaa_clientsecret= secret
uaa_ca_root= /path/to/uaa_ca.pem
uaa_clientid = id
uaa_clientsecret = secret
uaa_verify_cert = true
#############

View File

@ -1,13 +1,11 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: adminserver-rc
name: adminserver
labels:
name: adminserver-rc
name: adminserver
spec:
replicas: 1
selector:
name: adminserver-apps
template:
metadata:
labels:

View File

@ -1,13 +1,11 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: jobservice-rc
name: jobservice
labels:
name: jobservice-rc
name: jobservice
spec:
replicas: 1
selector:
name: jobservice-apps
template:
metadata:
labels:

View File

@ -210,6 +210,7 @@ output_dir = base_dir
generate_template(os.path.join(template_dir, 'ui.cm.yaml'), os.path.join(output_dir, 'ui/ui.cm.yaml'))
generate_template(os.path.join(template_dir, 'jobservice.cm.yaml'), os.path.join(output_dir, 'jobservice/jobservice.cm.yaml'))
generate_template(os.path.join(template_dir, 'mysql.cm.yaml'), os.path.join(output_dir, 'mysql/mysql.cm.yaml'))
generate_template(os.path.join(template_dir, 'nginx.cm.yaml'), os.path.join(output_dir, 'nginx/nginx.cm.yaml'))
generate_template(os.path.join(template_dir, 'registry.cm.yaml'), os.path.join(output_dir, 'registry/registry.cm.yaml'))
generate_template(os.path.join(template_dir, 'adminserver.cm.yaml'), os.path.join(output_dir, 'adminserver/adminserver.cm.yaml'))
generate_template(os.path.join(template_dir, 'ingress.yaml'), os.path.join(output_dir, 'ingress.yaml'))

View File

@ -1,13 +1,11 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: mysql-rc
name: mysql
labels:
name: mysql-rc
name: mysql
spec:
replicas: 1
selector:
name: mysql-apps
template:
metadata:
labels:

View File

@ -1,36 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-rc
labels:
name: nginx-rc
spec:
replicas: 1
selector:
name: nginx-apps
template:
metadata:
labels:
name: nginx-apps
spec:
containers:
- name: nginx-app
image: vmware/nginx-photon:1.11.13
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
- containerPort: 443
volumeMounts:
- name: config
mountPath: /etc/nginx
volumes:
- name: config
configMap:
name: harbor-nginx-config
items:
- key: config
path: nginx.conf
- key: pkey
path: https.key
- key: cert
path: https.crt

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
ports:
- name: http
port: 80
selector:
name: nginx-apps
# Set the external IP to an IP of the cluster node, so that the service can be accessed from outside the kubernetes cluster.
# externalIPs:
# - 10.192.168.5

View File

@ -1,13 +1,11 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: registry-rc
name: registry
labels:
name: registry-rc
name: registry
spec:
replicas: 1
selector:
name: registry-apps
template:
metadata:
labels:
@ -22,7 +20,7 @@ spec:
- containerPort: 5001
volumeMounts:
- name: config
mountPath: /etc/docker/registry
mountPath: /etc/registry
- name: storage
mountPath: /storage
volumes:

View File

@ -0,0 +1,22 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: harbor
spec:
rules:
- host: {{hostname}}
http:
paths:
- path: /
backend:
serviceName: ui
servicePort: 80
- path: /v2
backend:
serviceName: registry
servicePort: repo
- path: /service
backend:
serviceName: ui
servicePort: 80

View File

@ -1,89 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: harbor-nginx-config
data:
config: |
worker_processes auto;
events {
worker_connections 1024;
use epoll;
multi_accept on;
}
http {
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
proxy_http_version 1.1;
upstream registry {
server registry:5000;
}
upstream ui {
server ui:80;
}
server {
listen 80;
server_name {{hostname}};
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486)
chunked_transfer_encoding on;
# rewrite ^/(.*) https://$server_name:443/$1 permanent;
location / {
proxy_pass http://ui/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /v1/ {
return 404;
}
location /v2/ {
proxy_pass http://registry/v2/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_request_buffering off;
}
location /service/ {
proxy_pass http://ui/service/;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# When setting up Harbor behind other proxy, such as an Nginx instance, remove the below line if the proxy already has similar settings.
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_request_buffering off;
}
}
}
pkey: |
{{4 https_pkey}}
cert: |
{{4 https_cert}}

View File

@ -28,7 +28,7 @@ data:
token:
issuer: harbor-token-issuer
realm: {{ui_url}}/service/token
rootcertbundle: /etc/docker/registry/root.crt
rootcertbundle: /etc/registry/root.crt
service: harbor-registry
notifications:
endpoints:

View File

@ -1,13 +1,11 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ui-rc
name: ui
labels:
name: ui-rc
name: ui
spec:
replicas: 1
selector:
name: ui-apps
template:
metadata:
labels:

View File

@ -1,7 +1,7 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install -y git shadow sudo bzr rpm xz python-xml \
&& tdnf clean all \

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf install -y cronie rsyslog logrotate shadow tar gzip sudo net-tools\
&& mkdir /etc/rsyslog.d/ \
&& mkdir /var/spool/rsyslog \

View File

@ -2,7 +2,7 @@ FROM vmware/photon:1.0
#The Docker Daemon has to be running with storage backend btrfs when building the image
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf install -y sed shadow procps-ng gawk gzip sudo net-tools \
&& groupadd -r -g 10000 mysql && useradd --no-log-init -r -g 10000 -u 10000 mysql \
&& tdnf install -y mariadb-server mariadb \

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf install -y nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -1,6 +1,6 @@
FROM vmware/photon:1.0
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install -y shadow sudo \
&& tdnf clean all \

View File

@ -3,7 +3,7 @@ FROM vmware/photon:1.0
ENV PGDATA /var/lib/postgresql/data
RUN touch /etc/localtime.bak \
&& tdnf distro-sync -y || echo \
&& tdnf distro-sync -y \
&& tdnf install -y sed shadow gzip postgresql\
&& groupadd -r postgres --gid=999 \
&& useradd -r -g postgres --uid=999 postgres \

View File

@ -3,7 +3,7 @@ FROM vmware/photon:1.0
MAINTAINER wangyan@vmware.com
# The original script in the docker offical registry image.
RUN tdnf distro-sync -y || echo \
RUN tdnf distro-sync -y \
&& tdnf erase vim -y \
&& tdnf install sudo -y \
&& tdnf clean all \

View File

@ -217,6 +217,7 @@ else:
ldap_uid = rcp.get("configuration", "ldap_uid")
ldap_scope = rcp.get("configuration", "ldap_scope")
ldap_timeout = rcp.get("configuration", "ldap_timeout")
ldap_verify_cert = rcp.get("configuration", "ldap_verify_cert")
db_password = rcp.get("configuration", "db_password")
db_host = rcp.get("configuration", "db_host")
db_user = rcp.get("configuration", "db_user")
@ -234,11 +235,16 @@ if rcp.has_option("configuration", "admiral_url"):
admiral_url = rcp.get("configuration", "admiral_url")
else:
admiral_url = ""
pg_password = rcp.get("configuration", "clair_db_password")
clair_db_password = rcp.get("configuration", "clair_db_password")
clair_db_host = rcp.get("configuration", "clair_db_host")
clair_db_port = rcp.get("configuration", "clair_db_port")
clair_db_username = rcp.get("configuration", "clair_db_username")
clair_db = rcp.get("configuration", "clair_db")
uaa_endpoint = rcp.get("configuration", "uaa_endpoint")
uaa_clientid = rcp.get("configuration", "uaa_clientid")
uaa_clientsecret = rcp.get("configuration", "uaa_clientsecret")
uaa_ca_root = rcp.get("configuration", "uaa_ca_root")
uaa_verify_cert = rcp.get("configuration", "uaa_verify_cert")
secret_key = get_secret_key(secretkey_path)
log_rotate_count = rcp.get("configuration", "log_rotate_count")
@ -292,12 +298,6 @@ else:
render(os.path.join(templates_dir, "nginx", "nginx.http.conf"),
nginx_conf)
if auth_mode == "uaa_auth":
if os.path.isfile(uaa_ca_root):
shutil.copy2(uaa_ca_root, os.path.join(ui_certificates_dir, "uaa_ca.pem"))
else:
raise Exception("Error: Invalid path for uaa ca root: %s" % uaa_ca_root)
render(os.path.join(templates_dir, "adminserver", "env"),
adminserver_conf_env,
ui_url=ui_url,
@ -310,6 +310,7 @@ render(os.path.join(templates_dir, "adminserver", "env"),
ldap_filter=ldap_filter,
ldap_uid=ldap_uid,
ldap_scope=ldap_scope,
ldap_verify_cert=ldap_verify_cert,
ldap_timeout=ldap_timeout,
db_password=db_password,
db_host=db_host,
@ -332,10 +333,15 @@ render(os.path.join(templates_dir, "adminserver", "env"),
admiral_url=admiral_url,
with_notary=args.notary_mode,
with_clair=args.clair_mode,
pg_password=pg_password,
clair_db_password=clair_db_password,
clair_db_host=clair_db_host,
clair_db_port=clair_db_port,
clair_db_username=clair_db_username,
clair_db=clair_db,
uaa_endpoint=uaa_endpoint,
uaa_clientid=uaa_clientid,
uaa_clientsecret=uaa_clientsecret
uaa_clientsecret=uaa_clientsecret,
uaa_verify_cert=uaa_verify_cert
)
render(os.path.join(templates_dir, "ui", "env"),
@ -500,9 +506,14 @@ if args.clair_mode:
shutil.rmtree(os.path.join(clair_config_dir, "postgresql-init.d"))
shutil.copytree(os.path.join(clair_temp_dir, "postgresql-init.d"), os.path.join(clair_config_dir, "postgresql-init.d"))
postgres_env = os.path.join(clair_config_dir, "postgres_env")
render(os.path.join(clair_temp_dir, "postgres_env"), postgres_env, password = pg_password)
render(os.path.join(clair_temp_dir, "postgres_env"), postgres_env, password = clair_db_password)
clair_conf = os.path.join(clair_config_dir, "config.yaml")
render(os.path.join(clair_temp_dir, "config.yaml"), clair_conf, password = pg_password)
render(os.path.join(clair_temp_dir, "config.yaml"),
clair_conf,
password = clair_db_password,
username = clair_db_username,
host = clair_db_host,
port = clair_db_port)
if args.ha_mode:
prepare_ha(rcp, args)

250
src/Gopkg.lock generated Normal file
View File

@ -0,0 +1,250 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/Sirupsen/logrus"
packages = ["."]
revision = "a283a10442df8dc09befd873fab202bf8a253d6a"
[[projects]]
name = "github.com/Unknwon/goconfig"
packages = ["."]
revision = "5f601ca6ef4d5cea8d52be2f8b3a420ee4b574a5"
[[projects]]
branch = "master"
name = "github.com/agl/ed25519"
packages = [
".",
"edwards25519"
]
revision = "5312a61534124124185d41f09206b9fef1d88403"
[[projects]]
name = "github.com/astaxie/beego"
packages = [
".",
"cache",
"config",
"context",
"grace",
"logs",
"orm",
"session",
"session/redis",
"toolbox",
"utils",
"validation"
]
revision = "1aeb3d90512734def678c7aa9f612fe6f659e6b5"
version = "v1.6.1"
[[projects]]
name = "github.com/beego/i18n"
packages = ["."]
revision = "e87155e8f0c05bf323d0b13470e1b97af0cb5652"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dghubble/sling"
packages = ["."]
revision = "eb56e89ac5088bebb12eef3cb4b293300f43608b"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
version = "v3.0.0"
[[projects]]
name = "github.com/docker/distribution"
packages = [
".",
"context",
"digest",
"manifest",
"manifest/schema1",
"manifest/schema2",
"reference",
"registry/auth",
"registry/auth/token",
"registry/client/auth/challenge",
"uuid"
]
revision = "325b0804fef3a66309d962357aac3c2ce3f4d329"
version = "v2.6.0"
[[projects]]
branch = "master"
name = "github.com/docker/go"
packages = ["canonical/json"]
revision = "d30aec9fd63c35133f8f79c3412ad91a3b08be06"
[[projects]]
branch = "master"
name = "github.com/docker/libtrust"
packages = ["."]
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
[[projects]]
name = "github.com/docker/notary"
packages = [
".",
"client",
"client/changelist",
"cryptoservice",
"storage",
"trustmanager",
"trustmanager/yubikey",
"trustpinning",
"tuf",
"tuf/data",
"tuf/signed",
"tuf/utils",
"tuf/validation"
]
revision = "c04e3e6d05881045def11167c51d4a8baa34899a"
[[projects]]
name = "github.com/garyburd/redigo"
packages = [
"internal",
"redis"
]
revision = "47dc60e71eed504e3ef8e77ee3c6fe720f3be57f"
version = "v1.3.0"
[[projects]]
name = "github.com/go-sql-driver/mysql"
packages = ["."]
revision = "a732e14c62dde3285440047bba97581bc472ae18"
version = "v1.2"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto"]
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
revision = "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
[[projects]]
name = "github.com/gorilla/handlers"
packages = ["."]
revision = "13d73096a474cac93275c679c7b8a2dc17ddba82"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "780415097119f6f61c55475fe59b66f3c3e9ea53"
[[projects]]
name = "github.com/lib/pq"
packages = [
".",
"oid"
]
revision = "dd1fe2071026ce53f36a39112e645b4d4f5793a4"
[[projects]]
name = "github.com/mattn/go-sqlite3"
packages = ["."]
revision = "3fb7a0e792edd47bf0cf1e919dfc14e2be412e15"
[[projects]]
name = "github.com/miekg/pkcs11"
packages = ["."]
revision = "7283ca79f35edb89bc1b4ecae7f86a3680ce737f"
[[projects]]
name = "github.com/opencontainers/go-digest"
packages = ["."]
revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc"
version = "v1.0.0-rc0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"require"
]
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
[[projects]]
name = "golang.org/x/crypto"
packages = ["pbkdf2"]
revision = "5f961cd492ac9d43fc33a8ef646bae79d113fd97"
[[projects]]
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp"
]
revision = "075e191f18186a8ff2becaf64478e30f4545cdad"
[[projects]]
name = "golang.org/x/oauth2"
packages = [
".",
"internal"
]
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3"
[[projects]]
name = "google.golang.org/appengine"
packages = [
"internal",
"internal/base",
"internal/datastore",
"internal/log",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "24e4144ec923c2374f6b06610c0df16a9222c3d9"
[[projects]]
name = "gopkg.in/asn1-ber.v1"
packages = ["."]
revision = "4e86f4367175e39f69d9358a5f17b4dda270378d"
version = "v1.1"
[[projects]]
name = "gopkg.in/ldap.v2"
packages = ["."]
revision = "8168ee085ee43257585e50c6441aadf54ecb2c9f"
version = "v2.5.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "9c0f8cd26043afa12693fed0005998c7194c4ea77c8dae19c4363ebadfd600ef"
solver-name = "gps-cdcl"
solver-version = 1

53
src/Gopkg.toml Normal file
View File

@ -0,0 +1,53 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/astaxie/beego"
version = "1.6.1"
[[constraint]]
name = "github.com/dghubble/sling"
version = "1.1.0"
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
version = "3.0.0"
[[constraint]]
name = "github.com/docker/distribution"
version = "2.6.0"
[[constraint]]
branch = "master"
name = "github.com/docker/libtrust"
[[constraint]]
name = "github.com/go-sql-driver/mysql"
version = "1.2.0"
[[constraint]]
name = "github.com/opencontainers/go-digest"
version = "1.0.0-rc0"
[[constraint]]
name = "gopkg.in/ldap.v2"
version = "2.5.0"

View File

@ -15,36 +15,40 @@
package database
import (
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"fmt"
"github.com/vmware/harbor/src/adminserver/systemcfg/store"
"github.com/vmware/harbor/src/common"
"fmt"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"strconv"
)
const (
name = "database"
)
var(
)
var (
numKeys = map[string]bool{
common.EmailPort:true,
common.LDAPScope:true,
common.LDAPTimeout:true,
common.TokenExpiration:true,
common.MySQLPort:true,
common.MaxJobWorkers:true,
common.CfgExpiration:true,
}
boolKeys = map[string]bool{
common.WithClair:true,
common.WithNotary:true,
common.SelfRegistration:true,
common.EmailSSL:true,
common.EmailInsecure:true,
common.LDAPVerifyCert:true,
common.EmailPort: true,
common.LDAPScope: true,
common.LDAPTimeout: true,
common.TokenExpiration: true,
common.MySQLPort: true,
common.MaxJobWorkers: true,
common.CfgExpiration: true,
common.ClairDBPort: true,
}
)
boolKeys = map[string]bool{
common.WithClair: true,
common.WithNotary: true,
common.SelfRegistration: true,
common.EmailSSL: true,
common.EmailInsecure: true,
common.LDAPVerifyCert: true,
common.UAAVerifyCert: true,
}
)
type cfgStore struct {
name string
}
@ -55,14 +59,15 @@ func (c *cfgStore) Name() string {
}
// NewCfgStore New a cfg store for database driver
func NewCfgStore() (store.Driver, error){
func NewCfgStore() (store.Driver, error) {
return &cfgStore{
name: name,
}, nil
}
// Read configuration from database
func (c *cfgStore) Read() (map[string]interface{}, error) {
configEntries,error := dao.GetConfigEntries()
configEntries, error := dao.GetConfigEntries()
if error != nil {
return nil, error
}
@ -70,53 +75,54 @@ func (c *cfgStore) Read() (map[string]interface{}, error) {
}
// WrapperConfig Wrapper the configuration
func WrapperConfig (configEntries []*models.ConfigEntry) (map[string]interface{}, error) {
func WrapperConfig(configEntries []*models.ConfigEntry) (map[string]interface{}, error) {
config := make(map[string]interface{})
for _,entry := range configEntries{
if numKeys[entry.Key]{
for _, entry := range configEntries {
if numKeys[entry.Key] {
strvalue, err := strconv.Atoi(entry.Value)
if err != nil {
return nil, err
}
config[entry.Key] = float64(strvalue)
}else if boolKeys[entry.Key] {
} else if boolKeys[entry.Key] {
strvalue, err := strconv.ParseBool(entry.Value)
if err != nil {
return nil, err
}
config[entry.Key]=strvalue
}else{
config[entry.Key] = strvalue
} else {
config[entry.Key] = entry.Value
}
}
return config, nil
}
// Write save configuration to database
func (c *cfgStore) Write(config map[string]interface{}) error {
configEntries ,_:= TranslateConfig(config)
configEntries, _ := TranslateConfig(config)
return dao.SaveConfigEntries(configEntries)
}
// TranslateConfig Translate configuration from int, bool, float64 to string
func TranslateConfig(config map[string]interface{}) ([]models.ConfigEntry,error) {
func TranslateConfig(config map[string]interface{}) ([]models.ConfigEntry, error) {
var configEntries []models.ConfigEntry
for k, v := range config {
var entry = new(models.ConfigEntry)
entry.Key = k
switch v.(type) {
case string:
entry.Value=v.(string)
entry.Value = v.(string)
case int:
entry.Value=strconv.Itoa(v.(int))
entry.Value = strconv.Itoa(v.(int))
case bool:
entry.Value=strconv.FormatBool(v.(bool))
entry.Value = strconv.FormatBool(v.(bool))
case float64:
entry.Value=strconv.Itoa(int(v.(float64)))
entry.Value = strconv.Itoa(int(v.(float64)))
default:
return nil, fmt.Errorf("unknown type %v", v)
}
configEntries = append(configEntries,*entry)
configEntries = append(configEntries, *entry)
}
return configEntries,nil
return configEntries, nil
}

View File

@ -22,14 +22,14 @@ import (
enpt "github.com/vmware/harbor/src/adminserver/systemcfg/encrypt"
"github.com/vmware/harbor/src/adminserver/systemcfg/store"
"github.com/vmware/harbor/src/adminserver/systemcfg/store/database"
"github.com/vmware/harbor/src/adminserver/systemcfg/store/encrypt"
"github.com/vmware/harbor/src/adminserver/systemcfg/store/json"
"github.com/vmware/harbor/src/common"
comcfg "github.com/vmware/harbor/src/common/config"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/adminserver/systemcfg/store/database"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/adminserver/systemcfg/store/json"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
)
const (
@ -130,11 +130,19 @@ var (
parse: parseStringToBool,
},
common.ClairDBPassword: "CLAIR_DB_PASSWORD",
common.ClairDB: "CLAIR_DB",
common.ClairDBUsername: "CLAIR_DB_USERNAME",
common.ClairDBHost: "CLAIR_DB_HOST",
common.ClairDBPort: "CLAIR_DB_PORT",
common.UAAEndpoint: "UAA_ENDPOINT",
common.UAAClientID: "UAA_CLIENTID",
common.UAAClientSecret: "UAA_CLIENTSECRET",
common.UIURL: "UI_URL",
common.JobServiceURL: "JOBSERVICE_URL",
common.UAAVerifyCert: &parser{
env: "UAA_VERIFY_CERT",
parse: parseStringToBool,
},
common.UIURL: "UI_URL",
common.JobServiceURL: "JOBSERVICE_URL",
}
// configurations need read from environment variables
@ -163,6 +171,10 @@ var (
common.UAAEndpoint: "UAA_ENDPOINT",
common.UAAClientID: "UAA_CLIENTID",
common.UAAClientSecret: "UAA_CLIENTSECRET",
common.UAAVerifyCert: &parser{
env: "UAA_VERIFY_CERT",
parse: parseStringToBool,
},
}
)
@ -262,7 +274,7 @@ func initCfgStore() (err error) {
}
err = CfgStore.Write(jsonconfig)
if err != nil {
log.Error("Failed to update old configuration to dattabase")
log.Error("Failed to update old configuration to database")
return err
}
}
@ -327,7 +339,7 @@ func LoadFromEnv(cfgs map[string]interface{}, all bool) error {
}
// GetDatabaseFromCfg Create database object from config
func GetDatabaseFromCfg(cfg map[string]interface{}) (*models.Database){
func GetDatabaseFromCfg(cfg map[string]interface{}) *models.Database {
database := &models.Database{}
database.Type = cfg[common.DatabaseType].(string)
mysql := &models.MySQL{}

View File

@ -21,11 +21,8 @@ import (
"strconv"
"github.com/astaxie/beego/validation"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
http_error "github.com/vmware/harbor/src/common/utils/error"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/ui/auth"
"github.com/astaxie/beego"
)
@ -122,7 +119,7 @@ func (b *BaseAPI) DecodeJSONReq(v interface{}) {
err := json.Unmarshal(b.Ctx.Input.CopyBody(1<<32), v)
if err != nil {
log.Errorf("Error while decoding the json request, error: %v, %v",
err, string(b.Ctx.Input.CopyBody(1<<32)[:]))
err, string(b.Ctx.Input.CopyBody(1 << 32)[:]))
b.CustomAbort(http.StatusBadRequest, "Invalid json request")
}
}
@ -151,59 +148,6 @@ func (b *BaseAPI) DecodeJSONReqAndValidate(v interface{}) {
b.Validate(v)
}
// ValidateUser checks if the request triggered by a valid user
// TODO remove
func (b *BaseAPI) ValidateUser() int {
userID, needsCheck, ok := b.GetUserIDForRequest()
if !ok {
log.Warning("No user id in session, canceling request")
b.CustomAbort(http.StatusUnauthorized, "")
}
if needsCheck {
u, err := dao.GetUser(models.User{UserID: userID})
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
b.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if u == nil {
log.Warningf("User was deleted already, user id: %d, canceling request.", userID)
b.CustomAbort(http.StatusUnauthorized, "")
}
}
return userID
}
// GetUserIDForRequest tries to get user ID from basic auth header and session.
// It returns the user ID, whether need further verification(when the id is from session) and if the action is successful
// TODO remove
func (b *BaseAPI) GetUserIDForRequest() (int, bool, bool) {
username, password, ok := b.Ctx.Request.BasicAuth()
if ok {
log.Infof("Requst with Basic Authentication header, username: %s", username)
user, err := auth.Login(models.AuthModel{
Principal: username,
Password: password,
})
if err != nil {
log.Errorf("Error while trying to login, username: %s, error: %v", username, err)
user = nil
}
if user != nil {
b.SetSession("userId", user.UserID)
b.SetSession("username", user.Username)
// User login successfully no further check required.
return user.UserID, false, true
}
}
sessionUserID, ok := b.GetSession("userId").(int)
if ok {
// The ID is from session
return sessionUserID, true, true
}
log.Debug("No valid user id in session.")
return 0, false, false
}
// Redirect does redirection to resource URI with http header status code.
func (b *BaseAPI) Redirect(statusCode int, resouceID string) {
requestURI := b.Ctx.Request.RequestURI

View File

@ -18,6 +18,7 @@ package common
const (
DBAuth = "db_auth"
LDAPAuth = "ldap_auth"
UAAAuth = "uaa_auth"
ProCrtRestrEveryone = "everyone"
ProCrtRestrAdmOnly = "adminonly"
LDAPScopeBase = 1
@ -70,10 +71,15 @@ const (
WithClair = "with_clair"
ScanAllPolicy = "scan_all_policy"
ClairDBPassword = "clair_db_password"
ClairDBHost = "clair_db_host"
ClairDBPort = "clair_db_port"
ClairDB = "clair_db"
ClairDBUsername = "clair_db_username"
UAAEndpoint = "uaa_endpoint"
UAAClientID = "uaa_client_id"
UAAClientSecret = "uaa_client_secret"
DefaultClairEndpoint = "http://clair:6060"
UAAVerifyCert = "uaa_verify_cert"
DefaultClairEndpoint = "http://clair:6060"
CfgDriverDB = "db"
CfgDriverJSON = "json"
)

View File

@ -43,20 +43,20 @@ type Database interface {
}
// InitClairDB ...
func InitClairDB(password string) error {
func InitClairDB(clairDB *models.PostGreSQL) error {
//Except for password other information will not be configurable, so keep it hard coded for 1.2.0.
p := &pgsql{
host: "postgres",
port: 5432,
usr: "postgres",
pwd: password,
database: "postgres",
host: clairDB.Host,
port: clairDB.Port,
usr: clairDB.Username,
pwd: clairDB.Password,
database: clairDB.Database,
sslmode: false,
}
if err := p.Register(ClairDBAlias); err != nil {
return err
}
log.Info("initialized clair databas")
log.Info("initialized clair database")
return nil
}
@ -116,6 +116,12 @@ func GetOrmer() orm.Ormer {
func ClearTable(table string) error {
o := GetOrmer()
sql := fmt.Sprintf("delete from %s where 1=1", table)
if table == models.ProjectTable {
sql = fmt.Sprintf("delete from %s where project_id > 1", table)
}
if table == models.UserTable {
sql = fmt.Sprintf("delete from %s where user_id > 2", table)
}
_, err := o.Raw(sql).Exec()
return err
}

View File

@ -1067,7 +1067,7 @@ func TestAddRepJob(t *testing.T) {
func TestUpdateRepJobStatus(t *testing.T) {
err := UpdateRepJobStatus(jobID, models.JobFinished)
if err != nil {
t.Errorf("Error occured in UpdateRepJobStatus, error: %v, id: %d", err, jobID)
t.Errorf("Error occurred in UpdateRepJobStatus, error: %v, id: %d", err, jobID)
return
}
j, err := GetRepJob(jobID)
@ -1082,7 +1082,7 @@ func TestUpdateRepJobStatus(t *testing.T) {
}
err = UpdateRepJobStatus(jobID, models.JobPending)
if err != nil {
t.Errorf("Error occured in UpdateRepJobStatus when update it back to status pending, error: %v, id: %d", err, jobID)
t.Errorf("Error occurred in UpdateRepJobStatus when update it back to status pending, error: %v, id: %d", err, jobID)
return
}
}
@ -1090,7 +1090,7 @@ func TestUpdateRepJobStatus(t *testing.T) {
func TestGetRepPolicyByProject(t *testing.T) {
p1, err := GetRepPolicyByProject(99)
if err != nil {
t.Errorf("Error occured in GetRepPolicyByProject:%v, project ID: %d", err, 99)
t.Errorf("Error occurred in GetRepPolicyByProject:%v, project ID: %d", err, 99)
return
}
if len(p1) > 0 {
@ -1116,7 +1116,7 @@ func TestGetRepPolicyByProject(t *testing.T) {
func TestGetRepJobByPolicy(t *testing.T) {
jobs, err := GetRepJobByPolicy(999)
if err != nil {
t.Errorf("Error occured in GetRepJobByPolicy: %v, policy ID: %d", err, 999)
t.Errorf("Error occurred in GetRepJobByPolicy: %v, policy ID: %d", err, 999)
return
}
if len(jobs) > 0 {
@ -1125,7 +1125,7 @@ func TestGetRepJobByPolicy(t *testing.T) {
}
jobs, err = GetRepJobByPolicy(policyID)
if err != nil {
t.Errorf("Error occured in GetRepJobByPolicy: %v, policy ID: %d", err, policyID)
t.Errorf("Error occurred in GetRepJobByPolicy: %v, policy ID: %d", err, policyID)
return
}
if len(jobs) != 1 {
@ -1141,7 +1141,7 @@ func TestGetRepJobByPolicy(t *testing.T) {
func TestFilterRepJobs(t *testing.T) {
jobs, _, err := FilterRepJobs(policyID, "", "", nil, nil, 1000, 0)
if err != nil {
t.Errorf("Error occured in FilterRepJobs: %v, policy ID: %d", err, policyID)
t.Errorf("Error occurred in FilterRepJobs: %v, policy ID: %d", err, policyID)
return
}
if len(jobs) != 1 {
@ -1157,13 +1157,13 @@ func TestFilterRepJobs(t *testing.T) {
func TestDeleteRepJob(t *testing.T) {
err := DeleteRepJob(jobID)
if err != nil {
t.Errorf("Error occured in DeleteRepJob: %v, id: %d", err, jobID)
t.Errorf("Error occurred in DeleteRepJob: %v, id: %d", err, jobID)
return
}
t.Logf("deleted rep job, id: %d", jobID)
j, err := GetRepJob(jobID)
if err != nil {
t.Errorf("Error occured in GetRepJob:%v", err)
t.Errorf("Error occurred in GetRepJob:%v", err)
return
}
if j != nil {
@ -1226,7 +1226,7 @@ func TestGetRepoJobToStop(t *testing.T) {
func TestDeleteRepTarget(t *testing.T) {
err := DeleteRepTarget(targetID)
if err != nil {
t.Errorf("Error occured in DeleteRepTarget: %v, id: %d", err, targetID)
t.Errorf("Error occurred in DeleteRepTarget: %v, id: %d", err, targetID)
return
}
t.Logf("deleted target, id: %d", targetID)
@ -1259,13 +1259,13 @@ func TestUpdateRepPolicy(t *testing.T) {
func TestDeleteRepPolicy(t *testing.T) {
err := DeleteRepPolicy(policyID)
if err != nil {
t.Errorf("Error occured in DeleteRepPolicy: %v, id: %d", err, policyID)
t.Errorf("Error occurred in DeleteRepPolicy: %v, id: %d", err, policyID)
return
}
t.Logf("delete rep policy, id: %d", policyID)
p, err := GetRepPolicy(policyID)
if err != nil && err != orm.ErrNoRows {
t.Errorf("Error occured in GetRepPolicy:%v", err)
t.Errorf("Error occurred in GetRepPolicy:%v", err)
}
if p != nil && p.Deleted != 1 {
t.Errorf("Able to find rep policy after deletion, id: %d", policyID)

View File

@ -241,7 +241,7 @@ func GetRepPolicyByProjectAndTarget(projectID, targetID int64) ([]*models.RepPol
func UpdateRepPolicy(policy *models.RepPolicy) error {
o := GetOrmer()
policy.UpdateTime = time.Now()
_, err := o.Update(policy, "TargetID", "Name", "Description",
_, err := o.Update(policy, "ProjectID", "TargetID", "Name", "Description",
"Trigger", "Filters", "ReplicateDeletion", "UpdateTime")
return err
}

View File

@ -167,7 +167,7 @@ func UpdateImgScanOverview(digest, detailsKey string, sev models.Severity, compO
return nil
}
// ListImgScanOverviews list all records in table img_scan_overview, it is called in notificaiton handler when it needs to refresh the severity of all images.
// ListImgScanOverviews list all records in table img_scan_overview, it is called in notification handler when it needs to refresh the severity of all images.
func ListImgScanOverviews() ([]*models.ImgScanOverview, error) {
var res []*models.ImgScanOverview
o := GetOrmer()

View File

@ -57,6 +57,15 @@ type SQLite struct {
File string `json:"file"`
}
// PostGreSQL ...
type PostGreSQL struct {
Host string `json:"host"`
Port int `json:"port"`
Username string `json:"username"`
Password string `json:"password,omitempty"`
Database string `json:"database"`
}
// Email ...
type Email struct {
Host string `json:"host"`

View File

@ -19,6 +19,9 @@ import (
"time"
)
// ProjectTable is the table name for project
const ProjectTable = "project"
// Project holds the details of a project.
type Project struct {
ProjectID int64 `orm:"pk;auto;column(project_id)" json:"project_id"`
@ -174,3 +177,8 @@ type ProjectQueryResult struct {
Total int64
Projects []*Project
}
//TableName is required by beego orm to map Project to table project
func (p *Project) TableName() string {
return ProjectTable
}

View File

@ -19,5 +19,5 @@ type UAASettings struct {
Endpoint string
ClientID string
ClientSecret string
CARootPath string
VerifyCert bool
}

View File

@ -18,6 +18,9 @@ import (
"time"
)
// UserTable is the name of table in DB that holds the user object
const UserTable = "user"
// User holds the details of a user.
type User struct {
UserID int `orm:"pk;auto;column(user_id)" json:"user_id"`
@ -45,3 +48,8 @@ type UserQuery struct {
Email string
Pagination *Pagination
}
// TableName ...
func (u *User) TableName() string {
return UserTable
}

View File

@ -31,7 +31,7 @@ type HandlerChannel struct {
//To indicate how many handler instances bound with this chan.
boundCount uint32
//The chan for controling concurrent executions.
//The chan for controlling concurrent executions.
channel chan bool
}
@ -199,7 +199,7 @@ func (nw *NotificationWatcher) Notify(notification Notification) error {
}()
if err := hd.Handle(notification.Value); err != nil {
//Currently, we just log the error
log.Errorf("Error occurred when triggerring handler %s of topic %s: %s\n", reflect.TypeOf(hd).String(), notification.Topic, err.Error())
log.Errorf("Error occurred when triggering handler %s of topic %s: %s\n", reflect.TypeOf(hd).String(), notification.Topic, err.Error())
} else {
log.Infof("Handle notification with topic '%s': %#v\n", notification.Topic, notification.Value)
}

View File

@ -93,7 +93,7 @@ func (alp *AlternatePolicy) Done() <-chan bool {
//AttachTasks is an implementation of same method in policy interface.
func (alp *AlternatePolicy) AttachTasks(tasks ...task.Task) error {
if tasks == nil || len(tasks) == 0 {
if len(tasks) == 0 {
return errors.New("No tasks can be attached")
}

View File

@ -353,7 +353,7 @@ func (session *Session) createUserFilter(username string) string {
if username == "" {
filterTag = "*"
} else {
filterTag = username
filterTag = goldap.EscapeFilter(username)
}
ldapFilter := session.ldapConfig.LdapFilter

View File

@ -60,9 +60,15 @@ var adminServerDefaultConfig = map[string]interface{}{
common.AdmiralEndpoint: "http://www.vmware.com",
common.WithNotary: false,
common.WithClair: false,
common.ClairDBUsername: "postgres",
common.ClairDBHost: "postgres",
common.ClairDB: "postgres",
common.ClairDBPort: 5432,
common.ClairDBPassword: "password",
common.UAAClientID: "testid",
common.UAAClientSecret: "testsecret",
common.UAAEndpoint: "10.192.168.5",
common.UAAVerifyCert: false,
common.UIURL: "http://myui:8888/",
common.JobServiceURL: "http://myjob:8888/",
}

View File

@ -19,12 +19,25 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"github.com/vmware/harbor/src/common/utils/log"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
const (
//TokenURLSuffix ...
TokenURLSuffix = "/oauth/token"
//AuthURLSuffix ...
AuthURLSuffix = "/oauth/authorize"
//UserInfoURLSuffix ...
UserInfoURLSuffix = "/userinfo"
//UsersURLSuffix ...
UsersURLSuffix = "/Users"
)
// Client provides funcs to interact with UAA.
@ -33,6 +46,8 @@ type Client interface {
PasswordAuth(username, password string) (*oauth2.Token, error)
//GetUserInfoByToken send the token to OIDC endpoint to get user info, currently it's also used to validate the token.
GetUserInfo(token string) (*UserInfo, error)
//SearchUser searches a user based on user name.
SearchUser(name string) ([]*SearchUserEntry, error)
}
// ClientConfig values to initialize UAA Client
@ -56,21 +71,43 @@ type UserInfo struct {
Email string `json:"email"`
}
//SearchUserEmailEntry ...
type SearchUserEmailEntry struct {
Value string `json:"value"`
Primary bool `json:"primary"`
}
//SearchUserEntry is the struct of an entry of user within search result.
type SearchUserEntry struct {
ID string `json:"id"`
ExtID string `json:"externalId"`
UserName string `json:"userName"`
Emails []SearchUserEmailEntry `json:"emails"`
Groups []interface{}
}
//SearchUserRes is the struct to parse the result of search user API of UAA
type SearchUserRes struct {
Resources []*SearchUserEntry `json:"resources"`
TotalResults int `json:"totalResults"`
Schemas []string `json:"schemas"`
}
// DefaultClient leverages oauth2 pacakge for oauth features
type defaultClient struct {
httpClient *http.Client
oauth2Cfg *oauth2.Config
twoLegCfg *clientcredentials.Config
endpoint string
//TODO: add public key, etc...
}
func (dc *defaultClient) PasswordAuth(username, password string) (*oauth2.Token, error) {
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, dc.httpClient)
return dc.oauth2Cfg.PasswordCredentialsToken(ctx, username, password)
return dc.oauth2Cfg.PasswordCredentialsToken(dc.prepareCtx(), username, password)
}
func (dc *defaultClient) GetUserInfo(token string) (*UserInfo, error) {
userInfoURL := dc.endpoint + "/uaa/userinfo"
userInfoURL := dc.endpoint + UserInfoURLSuffix
req, err := http.NewRequest(http.MethodGet, userInfoURL, nil)
if err != nil {
return nil, err
@ -92,6 +129,45 @@ func (dc *defaultClient) GetUserInfo(token string) (*UserInfo, error) {
return info, nil
}
func (dc *defaultClient) SearchUser(username string) ([]*SearchUserEntry, error) {
token, err := dc.twoLegCfg.Token(dc.prepareCtx())
if err != nil {
return nil, err
}
url := dc.endpoint + UsersURLSuffix
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
q := req.URL.Query()
q.Add("filter", fmt.Sprintf("Username eq '%s'", username))
req.URL.RawQuery = q.Encode()
token.SetAuthHeader(req)
log.Debugf("request URL: %s", req.URL)
resp, err := dc.httpClient.Do(req)
if err != nil {
return nil, err
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected status code for searching user in UAA: %d, response: %s", resp.StatusCode, string(bytes))
}
res := &SearchUserRes{}
if err := json.Unmarshal(bytes, res); err != nil {
return nil, err
}
return res.Resources, nil
}
func (dc *defaultClient) prepareCtx() context.Context {
return context.WithValue(context.Background(), oauth2.HTTPClient, dc.httpClient)
}
// NewDefaultClient creates an instance of defaultClient.
func NewDefaultClient(cfg *ClientConfig) (Client, error) {
url := cfg.Endpoint
@ -125,14 +201,21 @@ func NewDefaultClient(cfg *ClientConfig) (Client, error) {
ClientID: cfg.ClientID,
ClientSecret: cfg.ClientSecret,
Endpoint: oauth2.Endpoint{
TokenURL: url + "/uaa/oauth/token",
AuthURL: url + "/uaa/oauth/authorize",
TokenURL: url + TokenURLSuffix,
AuthURL: url + AuthURLSuffix,
},
}
cc := &clientcredentials.Config{
ClientID: cfg.ClientID,
ClientSecret: cfg.ClientSecret,
TokenURL: url + TokenURLSuffix,
}
return &defaultClient{
httpClient: hc,
oauth2Cfg: oc,
twoLegCfg: cc,
endpoint: url,
}, nil
}

View File

@ -30,15 +30,18 @@ func TestMain(m *testing.M) {
}
}
func TestPasswordAuth(t *testing.T) {
cfg := &ClientConfig{
func getCfg() *ClientConfig {
return &ClientConfig{
ClientID: "uaa",
ClientSecret: "secret",
Endpoint: mockUAAServer.URL,
SkipTLSVerify: true,
}
}
func TestPasswordAuth(t *testing.T) {
assert := assert.New(t)
client, err := NewDefaultClient(cfg)
client, err := NewDefaultClient(getCfg())
assert.Nil(err)
_, err = client.PasswordAuth("user1", "pass1")
assert.Nil(err)
@ -47,14 +50,8 @@ func TestPasswordAuth(t *testing.T) {
}
func TestUserInfo(t *testing.T) {
cfg := &ClientConfig{
ClientID: "uaa",
ClientSecret: "secret",
Endpoint: mockUAAServer.URL,
SkipTLSVerify: true,
}
assert := assert.New(t)
client, err := NewDefaultClient(cfg)
client, err := NewDefaultClient(getCfg())
assert.Nil(err)
token, err := ioutil.ReadFile(path.Join(currPath(), "test", "./good-access-token.txt"))
if err != nil {
@ -68,6 +65,21 @@ func TestUserInfo(t *testing.T) {
assert.NotNil(err2)
}
func TestSearchUser(t *testing.T) {
assert := assert.New(t)
client, err := NewDefaultClient(getCfg())
assert.Nil(err)
res1, err := client.SearchUser("one")
assert.Nil(err)
assert.Equal(1, len(res1))
if len(res1) == 1 {
assert.Equal("one", res1[0].UserName)
}
res2, err := client.SearchUser("none")
assert.Nil(err)
assert.Equal(0, len(res2))
}
func currPath() string {
_, f, _, ok := runtime.Caller(0)
if !ok {

View File

@ -37,3 +37,40 @@ func (fc *FakeClient) PasswordAuth(username, password string) (*oauth2.Token, er
func (fc *FakeClient) GetUserInfo(token string) (*UserInfo, error) {
return nil, nil
}
// SearchUser ...
func (fc *FakeClient) SearchUser(name string) ([]*SearchUserEntry, error) {
res := []*SearchUserEntry{}
entryOne := &SearchUserEntry{
ExtID: "some-external-id-1",
ID: "u-0001",
UserName: "one",
Emails: []SearchUserEmailEntry{SearchUserEmailEntry{
Primary: false,
Value: "one@email.com",
}},
}
entryTwoA := &SearchUserEntry{
ExtID: "some-external-id-2-a",
ID: "u-0002a",
UserName: "two",
Emails: []SearchUserEmailEntry{SearchUserEmailEntry{
Primary: false,
Value: "two@email.com",
}},
}
entryTwoB := &SearchUserEntry{
ExtID: "some-external-id-2-b",
ID: "u-0002b",
UserName: "two",
}
if name == "one" {
res = append(res, entryOne)
} else if name == "two" {
res = append(res, entryTwoA)
res = append(res, entryTwoB)
} else if name == "error" {
return res, fmt.Errorf("some error")
}
return res, nil
}

View File

@ -1 +1 @@
eyJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiIyNmRjYjg1YzMzZjU0OGM5ODk2YjI4MDEwN2IyOWM0NiIsInN1YiI6IjlhMTM0ODhmLWYzY2YtNDdhNi05OGYwLTRmZWQyMWY0MzUyMCIsInNjb3BlIjpbIm9wZW5pZCJdLCJjbGllbnRfaWQiOiJrdWJlcm5ldGVzIiwiY2lkIjoia3ViZXJuZXRlcyIsImF6cCI6Imt1YmVybmV0ZXMiLCJncmFudF90eXBlIjoicGFzc3dvcmQiLCJ1c2VyX2lkIjoiOWExMzQ4OGYtZjNjZi00N2E2LTk4ZjAtNGZlZDIxZjQzNTIwIiwib3JpZ2luIjoibGRhcCIsInVzZXJfbmFtZSI6InVzZXIwMSIsImVtYWlsIjoidXNlcjAxQHVzZXIuZnJvbS5sZGFwLmNmIiwiYXV0aF90aW1lIjoxNTExNDA1NDEwLCJyZXZfc2lnIjoiOGEwYmY5OWQiLCJpYXQiOjE1MTE0MDU0MTAsImV4cCI6MTUxMTQ0ODYxMCwiaXNzIjoiaHR0cHM6Ly9sb2NhbGhvc3Q6ODQ0My91YWEvb2F1dGgvdG9rZW4iLCJ6aWQiOiJ1YWEiLCJhdWQiOlsia3ViZXJuZXRlcyIsIm9wZW5pZCJdfQ.I7VBx_cQoYkotRJ8KdmESAf_xjzp-R44BRz9ngHPUnoqr4rSMin-Ful8wNzEnaYaG56_mrIPuLOb6vXGWW1svRU892GOK9WQRSiFp7O81V7f1bH6JXnIGvyBNl3JOkDB9d5wXn137h9vNKq3Z9TF3jD7oXR_OENS8paclW5EAjmjGvEVIhObMmHCLhsJshTWIoP8AwoP1m9iqak_-t0c99HWaf1AgVUtT2i9Jb63ndJGA6BkOSRH_YxXmM_qtXmk_0kRA5oLDR2UGA4TVXCYp1_8iwQYjvGBVxO24I5jJh_zDYs5YLTFeNzMTPEhAl_Te6NiE91gRXq6KiVk9tTfuA
eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiIyNzlhNmI2MTRhMzM0NjVjYjYxZTM4ZmY5YTc4Y2YxZSIsInN1YiI6IjIwMTExNzE5LWNlM2EtNDRhYS05MmFjLTE3NmM0ZTM4MWY2NiIsInNjb3BlIjpbIm9wZW5pZCJdLCJjbGllbnRfaWQiOiJrdWJlcm5ldGVzIiwiY2lkIjoia3ViZXJuZXRlcyIsImF6cCI6Imt1YmVybmV0ZXMiLCJncmFudF90eXBlIjoicGFzc3dvcmQiLCJ1c2VyX2lkIjoiMjAxMTE3MTktY2UzYS00NGFhLTkyYWMtMTc2YzRlMzgxZjY2Iiwib3JpZ2luIjoibGRhcCIsInVzZXJfbmFtZSI6ImFkbWluIiwiZW1haWwiOiJhZG1pbkB1c2VyLmZyb20ubGRhcC5jZiIsImF1dGhfdGltZSI6MTUwNjg1MDQyNiwicmV2X3NpZyI6IjZkOWNlN2UwIiwiaWF0IjoxNTA2ODUwNDI2LCJleHAiOjE1MDY4OTM2MjYsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojg0NDMvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiYXVkIjpbImt1YmVybmV0ZXMiLCJvcGVuaWQiXX0.Ni2yJ7Gp6OnEdhcWyfGeCm1yG_rqQgf9BA0raJ37hdH-_ZRQ4HIELkWLv3gGPuWPV4HX6EKKerjJWXCPKihyiIIVT-W7VkwFMdDv9e4aA_h2eXjxHeUdjl0Cgw7gSAPSmm_QtkeLPuj15Ngd31yiuBoxy49_sjCyn3hjd8LP2ENEVtpk2vcCiQigW-YWbDaG64im1IP6jjRruwRdPF0Idjf4vuimFG-tiRdauDvnZc90W5fIJ3AUFW_ryGnSvc7E0rBZFYOgD5BB_3HLmWzB64-D3AVe9h5wQXOBorEaXLlSXfm16RQHFI_duSh3YOZUjHLuUYIRCuKaK5RPi0Fztg

View File

@ -0,0 +1 @@
{"resources":[],"startIndex":1,"itemsPerPage":100,"totalResults":0,"schemas":["urn:scim:schemas:core:1.0"]}

View File

@ -0,0 +1 @@
{"resources":[{"id":"6af888a1-92fa-4a30-82dd-4db28f2e15f0","externalId":"cn=one,dc=vmware,dc=com","meta":{"version":0,"created":"2017-12-20T22:54:34.493Z","lastModified":"2017-12-20T22:54:34.493Z"},"userName":"one","name":{},"emails":[{"value":"one@example.com","primary":false}],"groups":[{"value":"546a79d3-609b-49df-8111-56eee574fc99","display":"roles","type":"DIRECT"},{"value":"7e5eb7dc-8067-424a-a593-8620e9ef4962","display":"approvals.me","type":"DIRECT"},{"value":"9a946687-7be7-4a79-9742-462ae52e4833","display":"password.write","type":"DIRECT"},{"value":"f263a309-f855-405b-bcc4-e1c7453420c3","display":"uaa.offline_token","type":"DIRECT"},{"value":"80898c93-64a8-46cc-ba15-37fec9e2e56d","display":"uaa.user","type":"DIRECT"},{"value":"f8605b49-0dbc-47cf-a993-9691b7e313ab","display":"scim.userids","type":"DIRECT"},{"value":"83237f80-e709-40b9-8599-ab08b0f141a9","display":"oauth.approvals","type":"DIRECT"},{"value":"f685504d-c760-41cf-9c26-da8edddf643e","display":"user_attributes","type":"DIRECT"},{"value":"4243ba6a-001f-4052-8059-ada841a14e62","display":"cloud_controller.write","type":"DIRECT"},{"value":"36a94fb1-3bd2-4db6-8246-8a00256b080f","display":"profile","type":"DIRECT"},{"value":"0ead714c-02f1-403b-bd24-950089772f47","display":"scim.me","type":"DIRECT"},{"value":"a0944e04-1007-43ba-9745-e1ed62de21f5","display":"cloud_controller.read","type":"DIRECT"},{"value":"e9f2b839-9e2d-45b4-9179-5fce07cd013b","display":"cloud_controller_service_permissions.read","type":"DIRECT"},{"value":"2bb835d6-c62d-477b-a1df-780bb3ec560b","display":"openid","type":"DIRECT"}],"approvals":[],"active":true,"verified":true,"origin":"ldap","zoneId":"uaa","passwordLastModified":"2017-12-20T22:54:34.000Z","lastLogonTime":1513839274546,"schemas":["urn:scim:schemas:core:1.0"]}],"startIndex":1,"itemsPerPage":100,"totalResults":1,"schemas":["urn:scim:schemas:core:1.0"]}

View File

@ -53,23 +53,36 @@ func (t *tokenHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
http.Error(rw, "invalid client id/secret in header", http.StatusUnauthorized)
return
}
if gt := req.FormValue("grant_type"); gt != "password" {
gt := req.FormValue("grant_type")
if gt == "password" {
reqUsername := req.FormValue("username")
reqPasswd := req.FormValue("password")
if reqUsername == t.username && reqPasswd == t.password {
serveToken(rw)
} else {
http.Error(rw, fmt.Sprintf("invalid username/password %s/%s", reqUsername, reqPasswd), http.StatusUnauthorized)
}
} else if gt == "client_credentials" {
serveToken(rw)
} else {
http.Error(rw, fmt.Sprintf("invalid grant_type: %s", gt), http.StatusBadRequest)
return
}
reqUsername := req.FormValue("username")
reqPasswd := req.FormValue("password")
if reqUsername == t.username && reqPasswd == t.password {
token, err := ioutil.ReadFile(path.Join(currPath(), "./uaa-token.json"))
if err != nil {
panic(err)
}
_, err2 := rw.Write(token)
if err2 != nil {
panic(err2)
}
} else {
http.Error(rw, fmt.Sprintf("invalid username/password %s/%s", reqUsername, reqPasswd), http.StatusUnauthorized)
}
func serveToken(rw http.ResponseWriter) {
serveJSONFile(rw, "uaa-token.json")
}
func serveJSONFile(rw http.ResponseWriter, filename string) {
data, err := ioutil.ReadFile(path.Join(currPath(), filename))
if err != nil {
panic(err)
}
rw.Header().Add("Content-Type", "application/json")
_, err2 := rw.Write(data)
if err2 != nil {
panic(err2)
}
}
@ -78,27 +91,52 @@ type userInfoHandler struct {
}
func (u *userInfoHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
v := req.Header.Get("Authorization")
v := req.Header.Get("authorization")
prefix := v[0:7]
reqToken := v[7:]
if strings.ToLower(prefix) != "bearer " || reqToken != u.token {
http.Error(rw, "invalid token", http.StatusUnauthorized)
return
}
userInfo, err := ioutil.ReadFile(path.Join(currPath(), "./user-info.json"))
if err != nil {
panic(err)
serveJSONFile(rw, "./user-info.json")
}
type searchUserHandler struct {
token string
}
func (su *searchUserHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
v := req.Header.Get("authorization")
if v == "" {
v = req.Header.Get("Authorization")
}
_, err2 := rw.Write(userInfo)
if err2 != nil {
panic(err2)
prefix := v[0:7]
reqToken := v[7:]
if strings.ToLower(prefix) != "bearer " || reqToken != su.token {
http.Error(rw, "invalid token", http.StatusUnauthorized)
return
}
f := req.URL.Query().Get("filter")
elements := strings.Split(f, " ")
if len(elements) == 3 {
if elements[0] == "Username" && elements[1] == "eq" {
if elements[2] == "'one'" {
serveJSONFile(rw, "one-user.json")
return
}
serveJSONFile(rw, "no-user.json")
return
}
http.Error(rw, "invalid request", http.StatusBadRequest)
return
}
http.Error(rw, fmt.Sprintf("Invalid request, elements: %v", elements), http.StatusBadRequest)
}
// NewMockServer ...
func NewMockServer(cfg *MockServerConfig) *httptest.Server {
mux := http.NewServeMux()
mux.Handle("/uaa/oauth/token", &tokenHandler{
mux.Handle("/oauth/token", &tokenHandler{
cfg.ClientID,
cfg.ClientSecret,
cfg.Username,
@ -108,6 +146,7 @@ func NewMockServer(cfg *MockServerConfig) *httptest.Server {
if err != nil {
panic(err)
}
mux.Handle("/uaa/userinfo", &userInfoHandler{strings.TrimSpace(string(token))})
mux.Handle("/userinfo", &userInfoHandler{strings.TrimSpace(string(token))})
mux.Handle("/Users", &searchUserHandler{strings.TrimSpace(string(token))})
return httptest.NewTLSServer(mux)
}

View File

@ -1 +1 @@
{"access_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiIyNzlhNmI2MTRhMzM0NjVjYjYxZTM4ZmY5YTc4Y2YxZSIsInN1YiI6IjIwMTExNzE5LWNlM2EtNDRhYS05MmFjLTE3NmM0ZTM4MWY2NiIsInNjb3BlIjpbIm9wZW5pZCJdLCJjbGllbnRfaWQiOiJrdWJlcm5ldGVzIiwiY2lkIjoia3ViZXJuZXRlcyIsImF6cCI6Imt1YmVybmV0ZXMiLCJncmFudF90eXBlIjoicGFzc3dvcmQiLCJ1c2VyX2lkIjoiMjAxMTE3MTktY2UzYS00NGFhLTkyYWMtMTc2YzRlMzgxZjY2Iiwib3JpZ2luIjoibGRhcCIsInVzZXJfbmFtZSI6ImFkbWluIiwiZW1haWwiOiJhZG1pbkB1c2VyLmZyb20ubGRhcC5jZiIsImF1dGhfdGltZSI6MTUwNjg1MDQyNiwicmV2X3NpZyI6IjZkOWNlN2UwIiwiaWF0IjoxNTA2ODUwNDI2LCJleHAiOjE1MDY4OTM2MjYsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojg0NDMvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiYXVkIjpbImt1YmVybmV0ZXMiLCJvcGVuaWQiXX0.Ni2yJ7Gp6OnEdhcWyfGeCm1yG_rqQgf9BA0raJ37hdH-_ZRQ4HIELkWLv3gGPuWPV4HX6EKKerjJWXCPKihyiIIVT-W7VkwFMdDv9e4aA_h2eXjxHeUdjl0Cgw7gSAPSmm_QtkeLPuj15Ngd31yiuBoxy49_sjCyn3hjd8LP2ENEVtpk2vcCiQigW-YWbDaG64im1IP6jjRruwRdPF0Idjf4vuimFG-tiRdauDvnZc90W5fIJ3AUFW_ryGnSvc7E0rBZFYOgD5BB_3HLmWzB64-D3AVe9h5wQXOBorEaXLlSXfm16RQHFI_duSh3YOZUjHLuUYIRCuKaK5RPi0Fztg","token_type":"bearer","refresh_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiJkZTU0ZjJkMDlkODc0ZTliOTExZDk4YWQ1MTQzMjljZC1yIiwic3ViIjoiMjAxMTE3MTktY2UzYS00NGFhLTkyYWMtMTc2YzRlMzgxZjY2Iiwic2NvcGUiOlsib3BlbmlkIl0sImlhdCI6MTUwNjg1MDQyNiwiZXhwIjoxNTA5NDQyNDI2LCJjaWQiOiJrdWJlcm5ldGVzIiwiY2xpZW50X2lkIjoia3ViZXJuZXRlcyIsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojg0NDMvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiZ3JhbnRfdHlwZSI6InBhc3N3b3JkIiwidXNlcl9uYW1lIjoiYWRtaW4iLCJvcmlnaW4iOiJsZGFwIiwidXNlcl9pZCI6IjIwMTExNzE5LWNlM2EtNDRhYS05MmFjLTE3NmM0ZTM4MWY2NiIsInJldl9zaWciOiI2ZDljZTdlMCIsImF1ZCI6WyJrdWJlcm5ldGVzIiwib3BlbmlkIl19.oW4xK3QBjMtjUH_AWWyO6A0QwbIbTwrEFnc-hulj3QbLoULvC2V3L53rcKhT1gOtj8aaQTZFdBEQNGjBpzjFU8bpwxb0szyPMkc5PjXjcJGltL3MvmBf3P0TuUxJU9vP3FjrvwwueNAafLAyRIHy8yA3ZngzkL8KCI0ps51gCRU2oOe9hGDv2ZrsZ21u760hFGiRq5-7HWJu3VMqhMVRkUyPD_3j9AGZr6gf3o_7S9oJYwEDxPZaBhhVZI6QHeQNa07w7jCqTX97_fcpeTMbrBJiz_5yD9-kJZneI4xzAMIyNwAcbSJYrL7WZ2H01heGwWFEkrrv68YUJ762jB4WAw","expires_in":43199,"scope":"openid","jti":"279a6b614a33465cb61e38ff9a78cf1e"}
{"access_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiIyNzlhNmI2MTRhMzM0NjVjYjYxZTM4ZmY5YTc4Y2YxZSIsInN1YiI6IjIwMTExNzE5LWNlM2EtNDRhYS05MmFjLTE3NmM0ZTM4MWY2NiIsInNjb3BlIjpbIm9wZW5pZCJdLCJjbGllbnRfaWQiOiJrdWJlcm5ldGVzIiwiY2lkIjoia3ViZXJuZXRlcyIsImF6cCI6Imt1YmVybmV0ZXMiLCJncmFudF90eXBlIjoicGFzc3dvcmQiLCJ1c2VyX2lkIjoiMjAxMTE3MTktY2UzYS00NGFhLTkyYWMtMTc2YzRlMzgxZjY2Iiwib3JpZ2luIjoibGRhcCIsInVzZXJfbmFtZSI6ImFkbWluIiwiZW1haWwiOiJhZG1pbkB1c2VyLmZyb20ubGRhcC5jZiIsImF1dGhfdGltZSI6MTUwNjg1MDQyNiwicmV2X3NpZyI6IjZkOWNlN2UwIiwiaWF0IjoxNTA2ODUwNDI2LCJleHAiOjE1MDY4OTM2MjYsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojg0NDMvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiYXVkIjpbImt1YmVybmV0ZXMiLCJvcGVuaWQiXX0.Ni2yJ7Gp6OnEdhcWyfGeCm1yG_rqQgf9BA0raJ37hdH-_ZRQ4HIELkWLv3gGPuWPV4HX6EKKerjJWXCPKihyiIIVT-W7VkwFMdDv9e4aA_h2eXjxHeUdjl0Cgw7gSAPSmm_QtkeLPuj15Ngd31yiuBoxy49_sjCyn3hjd8LP2ENEVtpk2vcCiQigW-YWbDaG64im1IP6jjRruwRdPF0Idjf4vuimFG-tiRdauDvnZc90W5fIJ3AUFW_ryGnSvc7E0rBZFYOgD5BB_3HLmWzB64-D3AVe9h5wQXOBorEaXLlSXfm16RQHFI_duSh3YOZUjHLuUYIRCuKaK5RPi0Fztg","token_type":"bearer","refresh_token":"eyJhbGciOiJSUzI1NiIsImtpZCI6ImxlZ2FjeS10b2tlbi1rZXkiLCJ0eXAiOiJKV1QifQ.eyJqdGkiOiJkZTU0ZjJkMDlkODc0ZTliOTExZDk4YWQ1MTQzMjljZC1yIiwic3ViIjoiMjAxMTE3MTktY2UzYS00NGFhLTkyYWMtMTc2YzRlMzgxZjY2Iiwic2NvcGUiOlsib3BlbmlkIl0sImlhdCI6MTUwNjg1MDQyNiwiZXhwIjoxNTA5NDQyNDI2LCJjaWQiOiJrdWJlcm5ldGVzIiwiY2xpZW50X2lkIjoia3ViZXJuZXRlcyIsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojg0NDMvdWFhL29hdXRoL3Rva2VuIiwiemlkIjoidWFhIiwiZ3JhbnRfdHlwZSI6InBhc3N3b3JkIiwidXNlcl9uYW1lIjoiYWRtaW4iLCJvcmlnaW4iOiJsZGFwIiwidXNlcl9pZCI6IjIwMTExNzE5LWNlM2EtNDRhYS05MmFjLTE3NmM0ZTM4MWY2NiIsInJldl9zaWciOiI2ZDljZTdlMCIsImF1ZCI6WyJrdWJlcm5ldGVzIiwib3BlbmlkIl19.oW4xK3QBjMtjUH_AWWyO6A0QwbIbTwrEFnc-hulj3QbLoULvC2V3L53rcKhT1gOtj8aaQTZFdBEQNGjBpzjFU8bpwxb0szyPMkc5PjXjcJGltL3MvmBf3P0TuUxJU9vP3FjrvwwueNAafLAyRIHy8yA3ZngzkL8KCI0ps51gCRU2oOe9hGDv2ZrsZ21u760hFGiRq5-7HWJu3VMqhMVRkUyPD_3j9AGZr6gf3o_7S9oJYwEDxPZaBhhVZI6QHeQNa07w7jCqTX97_fcpeTMbrBJiz_5yD9-kJZneI4xzAMIyNwAcbSJYrL7WZ2H01heGwWFEkrrv68YUJ762jB4WAw","expires_in":43199,"jti":"279a6b614a33465cb61e38ff9a78cf1e", "scope":"clients.read password.write clients.secret uaa.resource openid clients.write uaa.admin scim.write scim.read client_id"}

View File

@ -17,12 +17,20 @@ package client
import (
"github.com/vmware/harbor/src/common/http"
"github.com/vmware/harbor/src/common/http/modifier/auth"
"github.com/vmware/harbor/src/jobservice/api"
)
// Replication holds information for submiting a replication job
type Replication struct {
PolicyID int64 `json:"policy_id"`
Repository string `json:"repository"`
Operation string `json:"operation"`
Tags []string `json:"tags"`
}
// Client defines the methods that a jobservice client should implement
type Client interface {
SubmitReplicationJob(*api.ReplicationReq) error
SubmitReplicationJob(*Replication) error
StopReplicationJobs(policyID int64) error
}
// DefaultClient provides a default implement for the interface Client
@ -50,7 +58,19 @@ func NewDefaultClient(endpoint string, cfg *Config) *DefaultClient {
}
// SubmitReplicationJob submits a replication job to the jobservice
func (d *DefaultClient) SubmitReplicationJob(replication *api.ReplicationReq) error {
func (d *DefaultClient) SubmitReplicationJob(replication *Replication) error {
url := d.endpoint + "/api/jobs/replication"
return d.client.Post(url, replication)
}
// StopReplicationJobs stop replication jobs of the policy specified by the policy ID
func (d *DefaultClient) StopReplicationJobs(policyID int64) error {
url := d.endpoint + "/api/jobs/replication/actions"
return d.client.Post(url, &struct {
PolicyID int64 `json:"policy_id"`
Action string `json:"action"`
}{
PolicyID: policyID,
Action: "stop",
})
}

View File

@ -22,18 +22,37 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/utils/test"
"github.com/vmware/harbor/src/jobservice/api"
)
var url string
func TestMain(m *testing.M) {
requestMapping := []*test.RequestHandlerMapping{
&test.RequestHandlerMapping{
Method: http.MethodPost,
Pattern: "/api/jobs/replication/actions",
Handler: func(w http.ResponseWriter, r *http.Request) {
action := &struct {
PolicyID int64 `json:"policy_id"`
Action string `json:"action"`
}{}
if err := json.NewDecoder(r.Body).Decode(action); err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
if action.PolicyID != 1 {
w.WriteHeader(http.StatusNotFound)
return
}
},
},
&test.RequestHandlerMapping{
Method: http.MethodPost,
Pattern: "/api/jobs/replication",
Handler: func(w http.ResponseWriter, r *http.Request) {
replication := &api.ReplicationReq{}
replication := &Replication{}
if err := json.NewDecoder(r.Body).Decode(replication); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
@ -50,6 +69,18 @@ func TestMain(m *testing.M) {
func TestSubmitReplicationJob(t *testing.T) {
client := NewDefaultClient(url, &Config{})
err := client.SubmitReplicationJob(&api.ReplicationReq{})
err := client.SubmitReplicationJob(&Replication{})
assert.Nil(t, err)
}
func TestStopReplicationJobs(t *testing.T) {
client := NewDefaultClient(url, &Config{})
// 404
err := client.StopReplicationJobs(2)
assert.NotNil(t, err)
// 200
err = client.StopReplicationJobs(1)
assert.Nil(t, err)
}

View File

@ -20,7 +20,6 @@ import (
common_models "github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/jobservice/api"
"github.com/vmware/harbor/src/jobservice/client"
"github.com/vmware/harbor/src/replication"
"github.com/vmware/harbor/src/replication/models"
@ -63,7 +62,7 @@ type DefaultController struct {
//Keep controller as singleton instance
var (
GlobalController Controller = NewDefaultController(ControllerConfig{}) //Use default data
GlobalController Controller
)
//ControllerConfig includes related configurations required by the controller
@ -82,16 +81,17 @@ func NewDefaultController(cfg ControllerConfig) *DefaultController {
triggerManager: trigger.NewManager(cfg.CacheCapacity),
}
// TODO read from configuration
endpoint := "http://jobservice:8080"
ctl.replicator = replicator.NewDefaultReplicator(endpoint,
&client.Config{
Secret: config.UISecret(),
})
ctl.replicator = replicator.NewDefaultReplicator(config.GlobalJobserviceClient)
return ctl
}
// Init creates the GlobalController and inits it
func Init() error {
GlobalController = NewDefaultController(ControllerConfig{}) //Use default data
return GlobalController.Init()
}
//Init will initialize the controller and the sub components
func (ctl *DefaultController) Init() error {
if ctl.initialized {
@ -110,9 +110,7 @@ func (ctl *DefaultController) Init() error {
if policies != nil && len(policies) > 0 {
for _, policy := range policies {
if err := ctl.triggerManager.SetupTrigger(&policy); err != nil {
//TODO: Log error
fmt.Printf("Error: %s", err)
//TODO:Update the status of policy
log.Errorf("failed to setup trigger for policy %v: %v", policy, err)
}
}
}
@ -143,8 +141,6 @@ func (ctl *DefaultController) CreatePolicy(newPolicy models.ReplicationPolicy) (
//UpdatePolicy will update the policy with new content.
//Parameter updatedPolicy must have the ID of the updated policy.
func (ctl *DefaultController) UpdatePolicy(updatedPolicy models.ReplicationPolicy) error {
// TODO check pre-conditions
id := updatedPolicy.ID
originPolicy, err := ctl.policyManager.GetPolicy(id)
if err != nil {
@ -308,11 +304,11 @@ func replicate(replicator replicator.Replicator, policyID int64, candidates []mo
}
for repository, tags := range repositories {
replication := &api.ReplicationReq{
PolicyID: policyID,
Repo: repository,
Operation: operation,
TagList: tags,
replication := &client.Replication{
PolicyID: policyID,
Repository: repository,
Operation: operation,
Tags: tags,
}
log.Debugf("submiting replication job to jobservice: %v", replication)
if err := replicator.Replicate(replication); err != nil {

View File

@ -26,6 +26,7 @@ import (
)
func TestMain(m *testing.M) {
GlobalController = NewDefaultController(ControllerConfig{})
// set the policy manager used by GlobalController with a fake policy manager
controller := GlobalController.(*DefaultController)
controller.policyManager = &test.FakePolicyManager{}

View File

@ -15,13 +15,12 @@
package replicator
import (
"github.com/vmware/harbor/src/jobservice/api"
"github.com/vmware/harbor/src/jobservice/client"
)
// Replicator submits the replication work to the jobservice
type Replicator interface {
Replicate(*api.ReplicationReq) error
Replicate(*client.Replication) error
}
// DefaultReplicator provides a default implement for Replicator
@ -30,13 +29,13 @@ type DefaultReplicator struct {
}
// NewDefaultReplicator returns an instance of DefaultReplicator
func NewDefaultReplicator(endpoint string, cfg *client.Config) *DefaultReplicator {
func NewDefaultReplicator(client client.Client) *DefaultReplicator {
return &DefaultReplicator{
client: client.NewDefaultClient(endpoint, cfg),
client: client,
}
}
// Replicate ...
func (d *DefaultReplicator) Replicate(replication *api.ReplicationReq) error {
func (d *DefaultReplicator) Replicate(replication *client.Replication) error {
return d.client.SubmitReplicationJob(replication)
}

View File

@ -18,18 +18,20 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/jobservice/api"
"github.com/vmware/harbor/src/jobservice/client"
)
type fakeJobserviceClient struct{}
func (f *fakeJobserviceClient) SubmitReplicationJob(replication *api.ReplicationReq) error {
func (f *fakeJobserviceClient) SubmitReplicationJob(replication *client.Replication) error {
return nil
}
func (f *fakeJobserviceClient) StopReplicationJobs(policyID int64) error {
return nil
}
func TestReplicate(t *testing.T) {
replicator := NewDefaultReplicator("http://jobservice", &client.Config{})
replicator.client = &fakeJobserviceClient{}
assert.Nil(t, replicator.Replicate(&api.ReplicationReq{}))
replicator := NewDefaultReplicator(&fakeJobserviceClient{})
assert.Nil(t, replicator.Replicate(&client.Replication{}))
}

View File

@ -34,8 +34,6 @@ func NewRepositoryConvertor(registry registry.Adaptor) *RepositoryConvertor {
// Convert projects to repositories
func (r *RepositoryConvertor) Convert(items []models.FilterItem) []models.FilterItem {
// TODO get repositories from database where the push/deletion operations are recorded
// if support replicate deletion
result := []models.FilterItem{}
for _, item := range items {
// just put it to the result list if the item is not a project

View File

@ -17,6 +17,7 @@ package source
import (
"strings"
"github.com/vmware/harbor/src/common/utils"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/replication"
"github.com/vmware/harbor/src/replication/models"
@ -71,6 +72,8 @@ func (r *RepositoryFilter) DoFilter(items []models.FilterItem) []models.FilterIt
log.Debugf("pattern is null, add %s to the repository filter result list", item.Value)
result = append(result, item)
} else {
// trim the project
_, repository = utils.ParseRepository(repository)
matched, err := match(r.pattern, repository)
if err != nil {
log.Errorf("failed to match pattern %s to value %s: %v", r.pattern, repository, err)

View File

@ -54,7 +54,7 @@ func TestDoFilterOfRepositoryFilter(t *testing.T) {
assert.Equal(t, 1, len(items))
// non-empty pattern
filter = NewRepositoryFilter("library/*", &registry.HarborAdaptor{})
filter = NewRepositoryFilter("*", &registry.HarborAdaptor{})
items = filter.DoFilter([]models.FilterItem{
models.FilterItem{
Kind: replication.FilterItemKindTag,
@ -64,7 +64,7 @@ func TestDoFilterOfRepositoryFilter(t *testing.T) {
assert.Equal(t, 1, len(items))
// non-empty pattern
filter = NewRepositoryFilter("library/*", &registry.HarborAdaptor{})
filter = NewRepositoryFilter("*", &registry.HarborAdaptor{})
items = filter.DoFilter([]models.FilterItem{
models.FilterItem{
Kind: replication.FilterItemKindTag,

View File

@ -52,6 +52,10 @@ var (
common.ProjectCreationRestriction,
common.TokenExpiration,
common.ScanAllPolicy,
common.UAAClientID,
common.UAAClientSecret,
common.UAAEndpoint,
common.UAAVerifyCert,
}
stringKeys = []string{
@ -68,6 +72,8 @@ var (
common.EmailFrom,
common.EmailIdentity,
common.ProjectCreationRestriction,
common.UAAClientID,
common.UAAEndpoint,
}
numKeys = []string{
@ -82,11 +88,13 @@ var (
common.EmailInsecure,
common.SelfRegistration,
common.LDAPVerifyCert,
common.UAAVerifyCert,
}
passwordKeys = []string{
common.EmailPassword,
common.LDAPSearchPwd,
common.UAAClientSecret,
}
)
@ -223,8 +231,8 @@ func validateCfg(c map[string]interface{}) (bool, error) {
}
if value, ok := strMap[common.AUTHMode]; ok {
if value != common.DBAuth && value != common.LDAPAuth {
return false, fmt.Errorf("invalid %s, shoud be %s or %s", common.AUTHMode, common.DBAuth, common.LDAPAuth)
if value != common.DBAuth && value != common.LDAPAuth && value != common.UAAAuth {
return false, fmt.Errorf("invalid %s, shoud be one of %s, %s, %s", common.AUTHMode, common.DBAuth, common.LDAPAuth, common.UAAAuth)
}
flag, err := authModeCanBeModified()
if err != nil {
@ -329,8 +337,12 @@ func convertForGet(cfg map[string]interface{}) (map[string]*value, error) {
if err != nil {
return nil, err
}
result[common.AUTHMode].Editable = flag
//All configuration of UAA will be un-editable for PKS 1.0 (1.4)
result[common.AUTHMode].Editable = flag && result[common.AUTHMode].Value.(string) != common.UAAAuth
result[common.UAAEndpoint].Editable = false
// result[common.UAAClientSecret].Editable = false
result[common.UAAVerifyCert].Editable = false
result[common.UAAClientID].Editable = false
return result, nil
}

View File

@ -134,7 +134,7 @@ func init() {
_ = updateInitPassword(1, "Harbor12345")
if err := core.GlobalController.Init(); err != nil {
if err := core.Init(); err != nil {
log.Fatalf("failed to initialize GlobalController: %v", err)
}

View File

@ -15,9 +15,7 @@
package api
import (
"fmt"
"net/http"
"strings"
"github.com/vmware/harbor/src/common/models"
ldapUtils "github.com/vmware/harbor/src/common/utils/ldap"
@ -30,7 +28,13 @@ type LdapAPI struct {
BaseController
}
const metaChars = "&|!=~*<>()"
const (
pingErrorMessage = "LDAP connection test failed!"
loadSystemErrorMessage = "Can't load system configuration!"
canNotOpenLdapSession = "Can't open LDAP session!"
searchLdapFailMessage = "LDAP search failed!"
importUserError = "Found internal error when importing LDAP user!"
)
// Prepare ...
func (l *LdapAPI) Prepare() {
@ -57,7 +61,7 @@ func (l *LdapAPI) Ping() {
ldapSession, err = ldapUtils.LoadSystemLdapConfig()
if err != nil {
log.Errorf("Can't load system configuration, error: %v", err)
l.RenderError(http.StatusInternalServerError, fmt.Sprintf("can't load system configuration: %v", err))
l.RenderError(http.StatusInternalServerError, pingErrorMessage)
return
}
err = ldapSession.ConnectionTest()
@ -68,7 +72,7 @@ func (l *LdapAPI) Ping() {
if err != nil {
log.Errorf("ldap connect fail, error: %v", err)
l.RenderError(http.StatusBadRequest, fmt.Sprintf("ldap connect fail: %v", err))
l.RenderError(http.StatusBadRequest, pingErrorMessage)
return
}
}
@ -84,7 +88,7 @@ func (l *LdapAPI) Search() {
ldapSession, err = ldapUtils.LoadSystemLdapConfig()
if err != nil {
log.Errorf("can't load system configuration, error: %v", err)
l.RenderError(http.StatusInternalServerError, fmt.Sprintf("can't load system configuration: %v", err))
l.RenderError(http.StatusInternalServerError, loadSystemErrorMessage)
return
}
} else {
@ -94,28 +98,18 @@ func (l *LdapAPI) Search() {
if err = ldapSession.Open(); err != nil {
log.Errorf("can't Open ldap session, error: %v", err)
l.RenderError(http.StatusInternalServerError, fmt.Sprintf("can't open ldap session: %v", err))
l.RenderError(http.StatusInternalServerError, canNotOpenLdapSession)
return
}
defer ldapSession.Close()
searchName := l.GetString("username")
if searchName != "" {
for _, c := range metaChars {
if strings.ContainsRune(searchName, c) {
log.Errorf("the search username contains meta char: %q", c)
l.RenderError(http.StatusBadRequest, fmt.Sprintf("the search username contains meta char: %q", c))
return
}
}
}
ldapUsers, err = ldapSession.SearchUser(searchName)
if err != nil {
log.Errorf("Ldap search fail, error: %v", err)
l.RenderError(http.StatusBadRequest, fmt.Sprintf("ldap search fail: %v", err))
l.RenderError(http.StatusBadRequest, searchLdapFailMessage)
return
}
@ -136,13 +130,13 @@ func (l *LdapAPI) ImportUser() {
if err != nil {
log.Errorf("Ldap import user fail, error: %v", err)
l.RenderError(http.StatusBadRequest, fmt.Sprintf("ldap import user fail: %v", err))
l.RenderError(http.StatusBadRequest, importUserError)
return
}
if len(ldapFailedImportUsers) > 0 {
log.Errorf("Import ldap user have internal error")
l.RenderError(http.StatusInternalServerError, fmt.Sprintf("import ldap user have internal error"))
l.RenderError(http.StatusInternalServerError, importUserError)
l.Data["json"] = ldapFailedImportUsers
l.ServeJSON()
return
@ -175,13 +169,6 @@ func importUsers(ldapConfs models.LdapConf, ldapImportUsers []string) ([]models.
continue
}
for _, c := range metaChars {
if strings.ContainsRune(u.UID, c) {
u.Error = "invaild_username"
break
}
}
if u.Error != "" {
failedImportUser = append(failedImportUser, u)
continue

View File

@ -0,0 +1,35 @@
// Copyright (c) 2017 VMware, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package models
import (
"github.com/astaxie/beego/validation"
)
// StopJobsReq holds information needed to stop the jobs for a replication rule
type StopJobsReq struct {
PolicyID int64 `json:"policy_id"`
Status string `json:"status"`
}
// Valid ...
func (s *StopJobsReq) Valid(v *validation.Validation) {
if s.PolicyID <= 0 {
v.SetError("policy_id", "invalid value")
}
if s.Status != "stop" {
v.SetError("status", "invalid status, valid values: [stop]")
}
}

View File

@ -23,6 +23,9 @@ import (
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
"github.com/vmware/harbor/src/replication/core"
api_models "github.com/vmware/harbor/src/ui/api/models"
"github.com/vmware/harbor/src/ui/config"
"github.com/vmware/harbor/src/ui/utils"
)
@ -40,7 +43,7 @@ func (ra *RepJobAPI) Prepare() {
return
}
if !ra.SecurityCtx.IsSysAdmin() {
if !(ra.Ctx.Request.Method == http.MethodGet || ra.SecurityCtx.IsSysAdmin()) {
ra.HandleForbidden(ra.SecurityCtx.GetUsername())
return
}
@ -63,16 +66,21 @@ func (ra *RepJobAPI) List() {
ra.CustomAbort(http.StatusBadRequest, "invalid policy_id")
}
policy, err := dao.GetRepPolicy(policyID)
policy, err := core.GlobalController.GetPolicy(policyID)
if err != nil {
log.Errorf("failed to get policy %d: %v", policyID, err)
ra.CustomAbort(http.StatusInternalServerError, "")
}
if policy == nil {
if policy.ID == 0 {
ra.CustomAbort(http.StatusNotFound, fmt.Sprintf("policy %d not found", policyID))
}
if !ra.SecurityCtx.HasAllPerm(policy.ProjectIDs[0]) {
ra.HandleForbidden(ra.SecurityCtx.GetUsername())
return
}
repository := ra.GetString("repository")
status := ra.GetString("status")
@ -145,12 +153,56 @@ func (ra *RepJobAPI) GetLog() {
if ra.jobID == 0 {
ra.CustomAbort(http.StatusBadRequest, "id is nil")
}
job, err := dao.GetRepJob(ra.jobID)
if err != nil {
ra.HandleInternalServerError(fmt.Sprintf("failed to get replication job %d: %v", ra.jobID, err))
return
}
if job == nil {
ra.HandleNotFound(fmt.Sprintf("replication job %d not found", ra.jobID))
return
}
policy, err := core.GlobalController.GetPolicy(job.PolicyID)
if err != nil {
ra.HandleInternalServerError(fmt.Sprintf("failed to get policy %d: %v", job.PolicyID, err))
return
}
if !ra.SecurityCtx.HasAllPerm(policy.ProjectIDs[0]) {
ra.HandleForbidden(ra.SecurityCtx.GetUsername())
return
}
url := buildJobLogURL(strconv.FormatInt(ra.jobID, 10), ReplicationJobType)
err := utils.RequestAsUI(http.MethodGet, url, nil, utils.NewJobLogRespHandler(&ra.BaseAPI))
err = utils.RequestAsUI(http.MethodGet, url, nil, utils.NewJobLogRespHandler(&ra.BaseAPI))
if err != nil {
ra.RenderError(http.StatusInternalServerError, err.Error())
return
}
}
// StopJobs stop replication jobs for the policy
func (ra *RepJobAPI) StopJobs() {
req := &api_models.StopJobsReq{}
ra.DecodeJSONReqAndValidate(req)
policy, err := core.GlobalController.GetPolicy(req.PolicyID)
if err != nil {
ra.HandleInternalServerError(fmt.Sprintf("failed to get policy %d: %v", req.PolicyID, err))
return
}
if policy.ID == 0 {
ra.CustomAbort(http.StatusNotFound, fmt.Sprintf("policy %d not found", req.PolicyID))
}
if err = config.GlobalJobserviceClient.StopReplicationJobs(req.PolicyID); err != nil {
ra.HandleInternalServerError(fmt.Sprintf("failed to stop replication jobs of policy %d: %v", req.PolicyID, err))
return
}
}
//TODO:add Post handler to call job service API to submit jobs by policy

View File

@ -42,7 +42,7 @@ func (pa *RepPolicyAPI) Prepare() {
return
}
if !pa.SecurityCtx.IsSysAdmin() {
if !(pa.Ctx.Request.Method == http.MethodGet || pa.SecurityCtx.IsSysAdmin()) {
pa.HandleForbidden(pa.SecurityCtx.GetUsername())
return
}
@ -61,6 +61,11 @@ func (pa *RepPolicyAPI) Get() {
pa.CustomAbort(http.StatusNotFound, http.StatusText(http.StatusNotFound))
}
if !pa.SecurityCtx.HasAllPerm(policy.ProjectIDs[0]) {
pa.HandleForbidden(pa.SecurityCtx.GetUsername())
return
}
ply, err := convertFromRepPolicy(pa.ProjectMgr, policy)
if err != nil {
pa.ParseAndHandleError(fmt.Sprintf("failed to convert from replication policy"), err)
@ -94,6 +99,9 @@ func (pa *RepPolicyAPI) List() {
}
for _, policy := range policies {
if !pa.SecurityCtx.HasAllPerm(policy.ProjectIDs[0]) {
continue
}
ply, err := convertFromRepPolicy(pa.ProjectMgr, policy)
if err != nil {
pa.ParseAndHandleError(fmt.Sprintf("failed to convert from replication policy"), err)

View File

@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/replication"
rep_models "github.com/vmware/harbor/src/replication/models"
@ -265,15 +266,28 @@ func TestRepPolicyAPIPost(t *testing.T) {
}
func TestRepPolicyAPIGet(t *testing.T) {
// 404
runCodeCheckingCases(t, &codeCheckingCase{
request: &testingRequest{
method: http.MethodGet,
url: fmt.Sprintf("%s/%d", repPolicyAPIBasePath, 10000),
credential: sysAdmin,
cases := []*codeCheckingCase{
// 404
&codeCheckingCase{
request: &testingRequest{
method: http.MethodGet,
url: fmt.Sprintf("%s/%d", repPolicyAPIBasePath, 10000),
credential: sysAdmin,
},
code: http.StatusNotFound,
},
code: http.StatusNotFound,
})
// 401
&codeCheckingCase{
request: &testingRequest{
method: http.MethodGet,
url: fmt.Sprintf("%s/%d", repPolicyAPIBasePath, policyID),
},
code: http.StatusUnauthorized,
},
}
runCodeCheckingCases(t, cases...)
// 200
policy := &api_models.ReplicationPolicy{}
@ -290,6 +304,39 @@ func TestRepPolicyAPIGet(t *testing.T) {
}
func TestRepPolicyAPIList(t *testing.T) {
projectAdmin := models.User{
Username: "project_admin",
Password: "ProjectAdmin",
Email: "project_admin@test.com",
}
projectDev := models.User{
Username: "project_dev",
Password: "ProjectDev",
Email: "project_dev@test.com",
}
proAdminID, err := dao.Register(projectAdmin)
if err != nil {
panic(err)
}
defer dao.DeleteUser(int(proAdminID))
if err = dao.AddProjectMember(1, int(proAdminID), models.PROJECTADMIN); err != nil {
panic(err)
}
defer dao.DeleteProjectMember(1, int(proAdminID))
proDevID, err := dao.Register(projectDev)
if err != nil {
panic(err)
}
defer dao.DeleteUser(int(proDevID))
if err = dao.AddProjectMember(1, int(proDevID), models.DEVELOPER); err != nil {
panic(err)
}
defer dao.DeleteProjectMember(1, int(proDevID))
// 400: invalid project ID
runCodeCheckingCases(t, &codeCheckingCase{
request: &testingRequest{
@ -305,7 +352,7 @@ func TestRepPolicyAPIList(t *testing.T) {
code: http.StatusBadRequest,
})
// 200
// 200 system admin
policies := []*api_models.ReplicationPolicy{}
resp, err := handleAndParse(
&testingRequest{
@ -326,6 +373,52 @@ func TestRepPolicyAPIList(t *testing.T) {
assert.Equal(t, policyID, policies[0].ID)
assert.Equal(t, policyName, policies[0].Name)
// 200 project admin
policies = []*api_models.ReplicationPolicy{}
resp, err = handleAndParse(
&testingRequest{
method: http.MethodGet,
url: repPolicyAPIBasePath,
queryStruct: struct {
ProjectID int64 `url:"project_id"`
Name string `url:"name"`
}{
ProjectID: projectID,
Name: policyName,
},
credential: &usrInfo{
Name: projectAdmin.Username,
Passwd: projectAdmin.Password,
},
}, &policies)
require.Nil(t, err)
assert.Equal(t, http.StatusOK, resp.Code)
require.Equal(t, 1, len(policies))
assert.Equal(t, policyID, policies[0].ID)
assert.Equal(t, policyName, policies[0].Name)
// 200 project developer
policies = []*api_models.ReplicationPolicy{}
resp, err = handleAndParse(
&testingRequest{
method: http.MethodGet,
url: repPolicyAPIBasePath,
queryStruct: struct {
ProjectID int64 `url:"project_id"`
Name string `url:"name"`
}{
ProjectID: projectID,
Name: policyName,
},
credential: &usrInfo{
Name: projectDev.Username,
Passwd: projectDev.Password,
},
}, &policies)
require.Nil(t, err)
assert.Equal(t, http.StatusOK, resp.Code)
require.Equal(t, 0, len(policies))
// 200
policies = []*api_models.ReplicationPolicy{}
resp, err = handleAndParse(

View File

@ -20,6 +20,7 @@ import (
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
"github.com/docker/distribution/manifest/schema1"
@ -66,6 +67,11 @@ type tagDetail struct {
DockerVersion string `json:"docker_version"`
Author string `json:"author"`
Created time.Time `json:"created"`
Config *cfg `json:"config"`
}
type cfg struct {
Labels map[string]string `json:"labels"`
}
type tagResp struct {
@ -473,9 +479,28 @@ func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) {
return detail, err
}
populateAuthor(detail)
return detail, nil
}
func populateAuthor(detail *tagDetail) {
// has author info already
if len(detail.Author) > 0 {
return
}
// try to set author with the value of label "maintainer"
if detail.Config != nil {
for k, v := range detail.Config.Labels {
if strings.ToLower(k) == "maintainer" {
detail.Author = v
return
}
}
}
}
// GetManifests returns the manifest of a tag
func (ra *RepositoryAPI) GetManifests() {
repoName := ra.GetString(":splat")
@ -638,7 +663,7 @@ func (ra *RepositoryAPI) Put() {
}
project, _ := utils.ParseRepository(name)
if !ra.SecurityCtx.HasAllPerm(project) {
if !ra.SecurityCtx.HasWritePerm(project) {
ra.HandleForbidden(ra.SecurityCtx.GetUsername())
return
}

View File

@ -199,3 +199,27 @@ func TestGetReposTop(t *testing.T) {
fmt.Printf("\n")
}
func TestPopulateAuthor(t *testing.T) {
author := "author"
detail := &tagDetail{
Author: author,
}
populateAuthor(detail)
assert.Equal(t, author, detail.Author)
detail = &tagDetail{}
populateAuthor(detail)
assert.Equal(t, "", detail.Author)
maintainer := "maintainer"
detail = &tagDetail{
Config: &cfg{
Labels: map[string]string{
"Maintainer": maintainer,
},
},
}
populateAuthor(detail)
assert.Equal(t, maintainer, detail.Author)
}

View File

@ -46,10 +46,11 @@ var registry = make(map[string]AuthenticateHelper)
// Register add different authenticators to registry map.
func Register(name string, h AuthenticateHelper) {
if _, dup := registry[name]; dup {
log.Infof("authenticator: %s has been registered", name)
log.Infof("authenticator: %s has been registered,skip", name)
return
}
registry[name] = h
log.Debugf("Registered authencation helper for auth mode: %s", name)
}
// Login authenticates user credentials based on setting.

View File

@ -28,8 +28,6 @@ import (
// Auth implements AuthenticateHelper interface to authenticate against LDAP
type Auth struct{}
const metaChars = "&|!=~*<>()"
// Authenticate checks user's credential against LDAP based on basedn template and LDAP URL,
// if the check is successful a dummy record will be inserted into DB, such that this user can
// be associated to other entities in the system.
@ -40,11 +38,6 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
log.Debugf("LDAP authentication failed for empty user id.")
return nil, nil
}
for _, c := range metaChars {
if strings.ContainsRune(p, c) {
return nil, fmt.Errorf("the principal contains meta char: %q", c)
}
}
ldapSession, err := ldapUtils.LoadSystemLdapConfig()

View File

@ -15,79 +15,118 @@
package uaa
import (
"fmt"
"strings"
"sync"
"github.com/vmware/harbor/src/common"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/uaa"
"github.com/vmware/harbor/src/ui/auth"
"github.com/vmware/harbor/src/ui/config"
)
var lock = &sync.Mutex{}
var client uaa.Client
//GetClient returns the client instance, if the client is not created it creates one.
func GetClient() (uaa.Client, error) {
lock.Lock()
defer lock.Unlock()
if client != nil {
return client, nil
}
//CreateClient create a UAA Client instance based on system configuration.
func CreateClient() (uaa.Client, error) {
UAASettings, err := config.UAASettings()
if err != nil {
return nil, err
}
cfg := &uaa.ClientConfig{
ClientID: UAASettings.ClientID,
ClientSecret: UAASettings.ClientSecret,
Endpoint: UAASettings.Endpoint,
CARootPath: UAASettings.CARootPath,
ClientID: UAASettings.ClientID,
ClientSecret: UAASettings.ClientSecret,
Endpoint: UAASettings.Endpoint,
SkipTLSVerify: !UAASettings.VerifyCert,
}
client, err = uaa.NewDefaultClient(cfg)
return client, err
return uaa.NewDefaultClient(cfg)
}
func doAuth(username, password string, client uaa.Client) (*models.User, error) {
t, err := client.PasswordAuth(username, password)
// Auth is the implementation of AuthenticateHelper to access uaa for authentication.
type Auth struct {
sync.Mutex
client uaa.Client
}
//Authenticate ...
func (u *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
if err := u.ensureClient(); err != nil {
return nil, err
}
t, err := u.client.PasswordAuth(m.Principal, m.Password)
if t != nil && err == nil {
//TODO: See if it's possible to get more information from token.
u := &models.User{
Username: username,
Password: "1234567ab",
Email: username + "@placeholder.com",
Realname: username,
}
err = dao.OnBoardUser(u)
if err == nil {
return u, nil
user := &models.User{
Username: m.Principal,
}
err = u.OnBoardUser(user)
return user, err
}
return nil, err
}
// Auth is the implementation of AuthenticateHelper to access uaa for authentication.
type Auth struct{}
// OnBoardUser will check if a user exists in user table, if not insert the user and
// put the id in the pointer of user model, if it does exist, return the user's profile.
func (u *Auth) OnBoardUser(user *models.User) error {
user.Username = strings.TrimSpace(user.Username)
if len(user.Username) == 0 {
return fmt.Errorf("The Username is empty")
}
if len(user.Password) == 0 {
user.Password = "1234567ab"
}
if len(user.Realname) == 0 {
user.Realname = user.Username
}
if len(user.Email) == 0 {
//TODO: handle the case when user.Username itself is an email address.
user.Email = user.Username + "@uaa.placeholder"
}
user.Comment = "From UAA"
return dao.OnBoardUser(user)
}
//Authenticate ...
func (u *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
client, err := GetClient()
// SearchUser search user on uaa server, transform it to Harbor's user model
func (u *Auth) SearchUser(username string) (*models.User, error) {
if err := u.ensureClient(); err != nil {
return nil, err
}
l, err := u.client.SearchUser(username)
if err != nil {
return nil, err
}
return doAuth(m.Principal, m.Password, client)
if len(l) == 0 {
return nil, nil
}
if len(l) > 1 {
return nil, fmt.Errorf("Multiple entries found for username: %s", username)
}
e := l[0]
email := ""
if len(e.Emails) > 0 {
email = e.Emails[0].Value
}
return &models.User{
Username: username,
Email: email,
}, nil
}
// OnBoardUser will check if a user exists in user table, if not insert the user and
// put the id in the pointer of user model, if it does exist, return the user's profile.
// func (u *Auth) OnBoardUser(user *models.User) error {
// panic("not implemented")
// }
// // SearchUser - search user on uaa server
// func (u *Auth) SearchUser(username string) (*models.User, error) {
// panic("not implemented")
// }
// func init() {
// auth.Register(auth.UAAAuth, &Auth{})
// }
func (u *Auth) ensureClient() error {
if u.client != nil {
return nil
}
u.Lock()
defer u.Unlock()
if u.client == nil {
c, err := CreateClient()
if err != nil {
return err
}
u.client = c
}
return nil
}
func init() {
auth.Register(common.UAAAuth, &Auth{})
}

View File

@ -17,45 +17,159 @@ package uaa
import (
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/dao"
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/utils/log"
utilstest "github.com/vmware/harbor/src/common/utils/test"
"github.com/vmware/harbor/src/common/utils/uaa"
"github.com/vmware/harbor/src/ui/config"
"os"
"strconv"
"testing"
)
func TestGetClient(t *testing.T) {
assert := assert.New(t)
func TestMain(m *testing.M) {
dbHost := os.Getenv("MYSQL_HOST")
if len(dbHost) == 0 {
log.Fatalf("environment variable MYSQL_HOST is not set")
}
dbUser := os.Getenv("MYSQL_USR")
if len(dbUser) == 0 {
log.Fatalf("environment variable MYSQL_USR is not set")
}
dbPortStr := os.Getenv("MYSQL_PORT")
if len(dbPortStr) == 0 {
log.Fatalf("environment variable MYSQL_PORT is not set")
}
dbPort, err := strconv.Atoi(dbPortStr)
if err != nil {
log.Fatalf("invalid MYSQL_PORT: %v", err)
}
dbPassword := os.Getenv("MYSQL_PWD")
dbDatabase := os.Getenv("MYSQL_DATABASE")
if len(dbDatabase) == 0 {
log.Fatalf("environment variable MYSQL_DATABASE is not set")
}
database := &models.Database{
Type: "mysql",
MySQL: &models.MySQL{
Host: dbHost,
Port: dbPort,
Username: dbUser,
Password: dbPassword,
Database: dbDatabase,
},
}
dao.InitDatabase(database)
server, err := utilstest.NewAdminserver(nil)
if err != nil {
t.Fatalf("failed to create a mock admin server: %v", err)
panic(err)
}
defer server.Close()
if err := os.Setenv("ADMINSERVER_URL", server.URL); err != nil {
t.Fatalf("failed to set env %s: %v", "ADMINSERVER_URL", err)
panic(err)
}
err = config.Init()
if err != nil {
t.Fatalf("failed to init config: %v", err)
panic(err)
}
c, err := GetClient()
err = dao.ClearTable("project_member")
if err != nil {
panic(err)
}
err = dao.ClearTable("project_metadata")
if err != nil {
panic(err)
}
err = dao.ClearTable("access_log")
if err != nil {
panic(err)
}
err = dao.ClearTable("project")
if err != nil {
panic(err)
}
err = dao.ClearTable("user")
if err != nil {
panic(err)
}
rc := m.Run()
os.Exit(rc)
}
func TestCreateClient(t *testing.T) {
assert := assert.New(t)
c, err := CreateClient()
assert.Nil(err)
assert.NotNil(c)
}
func TestDoAuth(t *testing.T) {
func TestAuthenticate(t *testing.T) {
assert := assert.New(t)
client := &uaa.FakeClient{
Username: "user1",
Password: "password1",
}
dao.PrepareTestForMySQL()
u1, err1 := doAuth("user1", "password1", client)
auth := Auth{client: client}
m1 := models.AuthModel{
Principal: "user1",
Password: "password1",
}
u1, err1 := auth.Authenticate(m1)
assert.Nil(err1)
assert.True(u1.UserID > 0)
u2, err2 := doAuth("wrong", "wrong", client)
assert.NotNil(u1)
m2 := models.AuthModel{
Principal: "wrong",
Password: "wrong",
}
u2, err2 := auth.Authenticate(m2)
assert.NotNil(err2)
assert.Nil(u2)
err3 := dao.ClearTable(models.UserTable)
assert.Nil(err3)
}
func TestOnBoardUser(t *testing.T) {
assert := assert.New(t)
auth := Auth{}
um1 := &models.User{
Username: " ",
}
err1 := auth.OnBoardUser(um1)
assert.NotNil(err1)
um2 := &models.User{
Username: "test ",
}
user2, _ := dao.GetUser(models.User{Username: "test"})
assert.Nil(user2)
err2 := auth.OnBoardUser(um2)
assert.Nil(err2)
user, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal("test", user.Realname)
assert.Equal("test", user.Username)
assert.Equal("test@uaa.placeholder", user.Email)
}
func TestSearchUser(t *testing.T) {
assert := assert.New(t)
client := &uaa.FakeClient{
Username: "user1",
Password: "password1",
}
auth := Auth{client: client}
_, err0 := auth.SearchUser("error")
assert.NotNil(err0)
u1, err1 := auth.SearchUser("one")
assert.Nil(err1)
assert.Equal("one@email.com", u1.Email)
_, err2 := auth.SearchUser("two")
assert.NotNil(err2)
user3, err3 := auth.SearchUser("none")
assert.Nil(user3)
assert.Nil(err3)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/vmware/harbor/src/common/models"
"github.com/vmware/harbor/src/common/secret"
"github.com/vmware/harbor/src/common/utils/log"
jobservice_client "github.com/vmware/harbor/src/jobservice/client"
"github.com/vmware/harbor/src/ui/promgr"
"github.com/vmware/harbor/src/ui/promgr/pmsdriver"
"github.com/vmware/harbor/src/ui/promgr/pmsdriver/admiral"
@ -54,6 +55,8 @@ var (
AdmiralClient *http.Client
// TokenReader is used in integration mode to read token
TokenReader admiral.TokenReader
// GlobalJobserviceClient is a global client for jobservice
GlobalJobserviceClient jobservice_client.Client
)
// Init configurations
@ -92,6 +95,11 @@ func InitByURL(adminServerURL string) error {
// init project manager based on deploy mode
initProjectManager()
GlobalJobserviceClient = jobservice_client.NewDefaultClient(InternalJobServiceURL(),
&jobservice_client.Config{
Secret: UISecret(),
})
return nil
}
@ -260,6 +268,10 @@ func InternalJobServiceURL() string {
return "http://jobservice"
}
if cfg[common.JobServiceURL] == nil {
return "http://jobservice"
}
return strings.TrimSuffix(cfg[common.JobServiceURL].(string), "/")
}
@ -380,15 +392,21 @@ func ClairEndpoint() string {
return common.DefaultClairEndpoint
}
// ClairDBPassword returns the password for accessing Clair's DB.
func ClairDBPassword() (string, error) {
// ClairDB return Clair db info
func ClairDB() (*models.PostGreSQL, error){
cfg, err := mg.Get()
if err != nil {
return "", err
log.Errorf("Failed to get configuration of Clair DB, Error detail %v", err)
return nil, err
}
return cfg[common.ClairDBPassword].(string), nil
clairDB := &models.PostGreSQL{}
clairDB.Host = cfg[common.ClairDBHost].(string)
clairDB.Port = int(cfg[common.ClairDBPort].(float64))
clairDB.Username = cfg[common.ClairDBUsername].(string)
clairDB.Password = cfg[common.ClairDBPassword].(string)
clairDB.Database = cfg[common.ClairDB].(string)
return clairDB, nil
}
// AdmiralEndpoint returns the URL of admiral, if Harbor is not deployed with admiral it should return an empty string.
func AdmiralEndpoint() string {
cfg, err := mg.Get()
@ -442,9 +460,7 @@ func UAASettings() (*models.UAASettings, error) {
Endpoint: cfg[common.UAAEndpoint].(string),
ClientID: cfg[common.UAAClientID].(string),
ClientSecret: cfg[common.UAAClientSecret].(string),
}
if len(os.Getenv("UAA_CA_ROOT")) != 0 {
us.CARootPath = os.Getenv("UAA_CA_ROOT")
VerifyCert: cfg[common.UAAVerifyCert].(bool),
}
return us, nil
}

View File

@ -19,6 +19,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/vmware/harbor/src/common/utils/test"
"github.com/vmware/harbor/src/common"
)
// test functions under package ui/config
@ -117,6 +118,18 @@ func TestConfig(t *testing.T) {
if _, err := Database(); err != nil {
t.Fatalf("failed to get database: %v", err)
}
clairDB, err := ClairDB();
if err != nil {
t.Fatalf("failed to get clair DB %v", err)
}
adminServerDefaultConfig := test.GetDefaultConfigMap()
assert.Equal(adminServerDefaultConfig[common.ClairDB],clairDB.Database)
assert.Equal(adminServerDefaultConfig[common.ClairDBUsername],clairDB.Username)
assert.Equal(adminServerDefaultConfig[common.ClairDBPassword],clairDB.Password)
assert.Equal(adminServerDefaultConfig[common.ClairDBHost], clairDB.Host)
assert.Equal(adminServerDefaultConfig[common.ClairDBPort], clairDB.Port)
if InternalNotaryEndpoint() != "http://notary-server:4443" {
t.Errorf("Unexpected notary endpoint: %s", InternalNotaryEndpoint())
}
@ -163,7 +176,7 @@ func TestConfig(t *testing.T) {
t.Fatalf("failed to get UAA setting, error: %v", err)
}
if us.ClientID != "testid" || us.ClientSecret != "testsecret" || us.Endpoint != "10.192.168.5" {
if us.ClientID != "testid" || us.ClientSecret != "testsecret" || us.Endpoint != "10.192.168.5" || us.VerifyCert {
t.Errorf("Unexpected UAA setting: %+v", *us)
}
assert.Equal("http://myjob:8888", InternalJobServiceURL())

Some files were not shown because too many files have changed in this diff Show More