Merge remote-tracking branch 'upstream/db-role-refactor' into db_role_refactor

This commit is contained in:
Wenkai Yin 2016-03-29 12:12:09 +08:00
commit f10769d5ba
2 changed files with 52 additions and 54 deletions

View File

@ -10,27 +10,30 @@ create table access (
primary key (access_id) primary key (access_id)
); );
insert into access values insert into access (access_code, comment) values
( null, 'A', 'All access for the system'), ('M', 'Management access for project'),
( null, 'M', 'Management access for project'), ('R', 'Read access for project'),
( null, 'R', 'Read access for project'), ('W', 'Write access for project'),
( null, 'W', 'Write access for project'), ('D', 'Delete access for project'),
( null, 'D', 'Delete access for project'), ('S', 'Search access for project');
( null, 'S', 'Search access for project');
create table role ( create table role (
role_id int NOT NULL AUTO_INCREMENT, role_id int NOT NULL AUTO_INCREMENT,
role_mask int DEFAULT 0 NOT NULL,
role_code varchar(20), role_code varchar(20),
name varchar (20), name varchar (20),
primary key (role_id) primary key (role_id)
); );
/*
role mask is used for future enhancement when a project member can have multi-roles
currently set to 0
*/
insert into role values insert into role (role_code, name) values
( null, 'AMDRWS', 'sysAdmin'), ('MDRWS', 'projectAdmin'),
( null, 'MDRWS', 'projectAdmin'), ('RWS', 'developer'),
( null, 'RWS', 'developer'), ('RS', 'guest');
( null, 'RS', 'guest');
create table user ( create table user (
@ -43,20 +46,24 @@ create table user (
deleted tinyint (1) DEFAULT 0 NOT NULL, deleted tinyint (1) DEFAULT 0 NOT NULL,
reset_uuid varchar(40) DEFAULT NULL, reset_uuid varchar(40) DEFAULT NULL,
salt varchar(40) DEFAULT NULL, salt varchar(40) DEFAULT NULL,
sysadmin_flag tinyint (1),
creation_time timestamp,
update_time timestamp,
primary key (user_id), primary key (user_id),
UNIQUE (username), UNIQUE (username),
UNIQUE (email) UNIQUE (email)
); );
insert into user values insert into user (username, email, password, realname, comment, deleted, sysadmin_flag, creation_time, update_time) values
(1, 'admin', 'admin@example.com', '', 'system admin', 'admin user',0, null, ''), ('admin', 'admin@example.com', '', 'system admin', 'admin user',0, 1, NOW(), NOW()),
(2, 'anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, null, ''); ('anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, 0, NOW(), NOW());
create table project ( create table project (
project_id int NOT NULL AUTO_INCREMENT, project_id int NOT NULL AUTO_INCREMENT,
owner_id int NOT NULL, owner_id int NOT NULL,
name varchar (30) NOT NULL, name varchar (30) NOT NULL,
creation_time timestamp, creation_time timestamp,
update_time timestamp,
deleted tinyint (1) DEFAULT 0 NOT NULL, deleted tinyint (1) DEFAULT 0 NOT NULL,
public tinyint (1) DEFAULT 0 NOT NULL, public tinyint (1) DEFAULT 0 NOT NULL,
primary key (project_id), primary key (project_id),
@ -64,32 +71,23 @@ create table project (
UNIQUE (name) UNIQUE (name)
); );
insert into project values insert into project (owner_id, name, creation_time, update_time, public) values
(null, 1, 'library', NOW(), 0, 1); (1, 'library', NOW(), NOW(), 1);
create table project_role ( create table project_member (
pr_id int NOT NULL AUTO_INCREMENT,
project_id int NOT NULL, project_id int NOT NULL,
role_id int NOT NULL,
primary key (pr_id),
FOREIGN KEY (role_id) REFERENCES role(role_id),
FOREIGN KEY (project_id) REFERENCES project (project_id)
);
insert into project_role values
( 1,1,1 );
create table user_project_role (
upr_id int NOT NULL AUTO_INCREMENT,
user_id int NOT NULL, user_id int NOT NULL,
pr_id int NOT NULL, role int NOT NULL,
primary key (upr_id), creation_time timestamp,
FOREIGN KEY (user_id) REFERENCES user(user_id), update_time timestamp,
FOREIGN KEY (pr_id) REFERENCES project_role (pr_id) PRIMARY KEY (project_id, user_id),
); FOREIGN KEY (role) REFERENCES role(role_id),
FOREIGN KEY (project_id) REFERENCES project(project_id),
FOREIGN KEY (user_id) REFERENCES user(user_id)
);
insert into user_project_role values insert into project_member (project_id, user_id, role, creation_time, update_time) values
( 1,1,1 ); (1, 1, 1, NOW(), NOW());
create table access_log ( create table access_log (
log_id int NOT NULL AUTO_INCREMENT, log_id int NOT NULL AUTO_INCREMENT,

View File

@ -1,16 +1,16 @@
# Installation Guide of Harbor # Installation Guide of Harbor
### Download the installation package ### Download the installation package
Harbor can be installed from the source code by using "docker-compose up" command, which goes through a full build process. Besides, a pre-built installation package for each release can be downloaded from the [release page](https://github.com/vmware/harbor/releases). This guide describes the installation of Harbor by the using pre-built package. Harbor can be installed from the source code by using "docker-compose up" command, which goes through a full build process. Besides, a pre-built installation package of each release can be downloaded from the [release page](https://github.com/vmware/harbor/releases). This guide describes the installation of Harbor by using the pre-built package.
### Prerequisites for target machine ### Prerequisites for target machine
Harbor is deployed as several Docker containers. Hence, it can be deployed on any Linux distribution that supports Docker. Harbor is deployed as several Docker containers. Hence, it can be deployed on any Linux distribution that supports Docker.
Before deploying Harbor, the target machine requires Python, Docker, Docker Compose to be installed. Before deploying Harbor, the target machine requires Python, Docker, Docker Compose to be installed.
* Python should be version 2.7 or higher. Some Linux distributions (Gentoo, Arch) may not have a Python interpreter installed by default. On those systems, you need to install Python manually. * Python should be version 2.7 or higher. Some Linux distributions (Gentoo, Arch) may not have a Python interpreter installed by default. On those systems, you need to install Python manually.
* The Docker engine should be version 1.8 or higher. For the details to install Docker engine, please refer to: https://docs.docker.com/engine/installation/ * The Docker engine should be version 1.8 or higher. For the details to install Docker engine, please refer to: https://docs.docker.com/engine/installation/
* The Docker Compose needs to be version 1.6.0 or higher. For the details to install Docker compose, please refer to: https://docs.docker.com/compose/install/ * The Docker Compose needs to be version 1.6.0 or higher. For the details to install Docker compose, please refer to: https://docs.docker.com/compose/install/
### Configuration of Harbor ### Configuration of Harbor
After downloading the package file **```harbor-<version>.tgz```** from release page, you need to extract the package. Before installing Harbor, configure the parameters in the file **harbor.cfg**. Then execute the **prepare** script to generate configuration files for Harbor's containers. Finally, use Docker Compose to start the service. After downloading the package file **harbor-&lt;version&gt;.tgz** from the release page, you need to extract files from the package. Before installing Harbor, you should configure the parameters in the file **harbor.cfg**. You then execute the **prepare** script to generate configuration files for Harbor's containers. Finally, you use Docker Compose to start Harbor.
At minimum, you only need to change the **hostname** attribute in **harbor.cfg** by updating the IP address or fully qualified hostname of your target machine, for example 192.168.1.10. Please see the next section for the description of each parameter. At minimum, you only need to change the **hostname** attribute in **harbor.cfg** by updating the IP address or the fully qualified domain name (FQDN) of your target machine, for example 192.168.1.10. Please see the next section for the description of each parameter.
``` ```
$ tar -xzvf harbor-0.1.0.tgz $ tar -xzvf harbor-0.1.0.tgz
$ cd harbor $ cd harbor
@ -29,26 +29,26 @@ After that, you can open a browser and access Harbor via the IP you set in harbo
```docker pull 192.168.1.10/library/ubuntu``` ```docker pull 192.168.1.10/library/ubuntu```
#### Parameters in harbor.cfg #### Parameters in harbor.cfg
**hostname**: The endpoint for user to access UI and registry service, for example 192.168.1.10 or exampledomian.com. **hostname**: The endpoint for a user to access the user interface and the registry service, for example 192.168.1.10 or exampledomian.com.
**ui_url_protocol**: The protocol for accessing the UI and token/notification service, by default it is http. **ui_url_protocol**: The protocol for accessing the user interface and the token/notification service, by default it is http.
**Email settings**: the following 5 attributes are used to send an email to reset user's password, it is not mandatory unless password reset function is needed in Harbor. **Email settings**: the following 5 attributes are used to send an email to reset a user's password, they are not mandatory unless the password reset function is needed in Harbor.
* email_server = smtp.mydomain.com * email_server = smtp.mydomain.com
* email_server_port = 25 * email_server_port = 25
* email_username = sample_admin@mydomain.com * email_username = sample_admin@mydomain.com
* email_password = abc * email_password = abc
* email_from = admin <sample_admin@mydomain.com> * email_from = admin <sample_admin@mydomain.com>
**harbor_admin_password**: The password for administrator of Harbor, by default it is Harbor12345, the user name is admin. **harbor_admin_password**: The password for the administrator of Harbor, by default the password is Harbor12345, the user name is admin.
**auth_mode**: The authentication mode of Harbor. By default it is *db_auth*, i.e. the credentials are stored in a database. Please set it to *ldap_auth* if you want to verify user's credentials against an LDAP server. **auth_mode**: The authentication mode of Harbor. By default it is *db_auth*, i.e. the credentials are stored in a database. Please set it to *ldap_auth* if you want to verify user's credentials against an LDAP server.
**ldap_url**: The URL for LDAP endpoint, for example ldaps://ldap.mydomain.com. It is only used when **auth_mode** is set to *ldap_auth*. **ldap_url**: The URL for LDAP endpoint, for example ldaps://ldap.mydomain.com. It is only used when **auth_mode** is set to *ldap_auth*.
**ldap_basedn**: The basedn template for verifying the user's credentials against LDAP, for example uid=%s,ou=people,dc=mydomain,dc=com. It is only used when **auth_mode** is set to *ldap_auth*. **ldap_basedn**: The basedn template for verifying the user's credentials against LDAP, for example uid=%s,ou=people,dc=mydomain,dc=com. It is only used when **auth_mode** is set to *ldap_auth*.
**db_password**: The password of root user of mySQL database. **db_password**: The password of root user of mySQL database.
### Deploy Harbor to a target machine that does not have Internet access ### Deploy Harbor to a target machine that does not have Internet access
When you run *docker-compose up* to start Harbor service. It will pull base images from Docker hub and build new images for the containers. This process requires accessing the Internet. If you want to deploy Harbor to a host that is not connected to the Internet, you need to prepare Harbor on a machine that has access to the Internet. After that, you export the images as tgz files and transfer them to the target machine, then load the tgz file into Docker's local image repo. When you run *docker-compose up* to start Harbor, it will pull base images from Docker Hub and build new images for the containers. This process requires accessing the Internet. If you want to deploy Harbor to a host that is not connected to the Internet, you need to prepare Harbor on a machine that has access to the Internet. After that, you export the images as tgz files and transfer them to the target machine, then load the tgz file into Docker's local image repo.
#### Build and save images for offline installation #### Build and save images for offline installation
On a machine that is connect to Internet, extract the installation package. Then run command "docker-compose build" to build the images and use the script *save_image.sh* to export them as tar files. The tar files will be stored in **images** directory. Next, user can package everything in directory **harbor** into a tgz file and transfer the tgz file to the target machine. This can be done by executing the following commands: On a machine that is connected to the Internet, extract files from the installation package. Then run command "docker-compose build" to build the images and use the script *save_image.sh* to export them as tar files. The tar files will be stored in **images** directory. Next, package everything in the directory **harbor** into a tgz file and transfer it to the target machine. This can be done by executing the following commands:
``` ```
$ cd harbor $ cd harbor
@ -69,8 +69,8 @@ $ cd ../
$ tar -cvzf harbor_offline-0.1.0.tgz harbor $ tar -cvzf harbor_offline-0.1.0.tgz harbor
``` ```
The package file **harbor_offline-0.1.0.tgz** contains the images saved by previously steps and the files needed to start Harbor services. The package file **harbor_offline-0.1.0.tgz** contains the images saved by previously steps and the files required to start Harbor.
Then you can use tools such as scp to transfer the file **harbor_offline-0.1.0.tgz** to the target machine that does not have Internet access. On the target machine, you can execute the following commands to start Harbor service. Again, before running the **prepare** script, be sure to update **harbor.cfg** to reflect the right configuration of the target machine. You can use tools such as scp to transfer the file **harbor_offline-0.1.0.tgz** to the target machine that does not have Internet connection. On the target machine, you can execute the following commands to start Harbor. Again, before running the **prepare** script, be sure to update **harbor.cfg** to reflect the right configuration of the target machine.
``` ```
$ tar -xzvf harbor_offline-0.1.tgz $ tar -xzvf harbor_offline-0.1.tgz
$ cd harbor $ cd harbor
@ -98,7 +98,7 @@ $ sudo docker-compose up -d
### Manage Harbor's lifecycle ### Manage Harbor's lifecycle
Harbor is composed of a few containers which are deployed via docker-compose, you can use docker-compose to manage the lifecycle of the containers. Below are a few useful commands: Harbor is composed of a few containers which are deployed via docker-compose, you can use docker-compose to manage the lifecycle of the containers. Below are a few useful commands:
Create and start Harbor: Build and start Harbor:
``` ```
$ sudo docker-compose up -d $ sudo docker-compose up -d
Creating harbor_log_1 Creating harbor_log_1
@ -125,7 +125,7 @@ Starting harbor_registry_1
Starting harbor_ui_1 Starting harbor_ui_1
Starting harbor_proxy_1 Starting harbor_proxy_1
```` ````
Remove Harbor's containers (the image data and Harbor database files remains on the file system): Remove Harbor's containers (the image data and Harbor's database files remains on the file system):
``` ```
$ sudo docker-compose rm $ sudo docker-compose rm
Going to remove harbor_proxy_1, harbor_ui_1, harbor_registry_1, harbor_mysql_1, harbor_log_1 Going to remove harbor_proxy_1, harbor_ui_1, harbor_registry_1, harbor_mysql_1, harbor_log_1
@ -135,8 +135,8 @@ Removing harbor_ui_1 ... done
Removing harbor_registry_1 ... done Removing harbor_registry_1 ... done
Removing harbor_mysql_1 ... done Removing harbor_mysql_1 ... done
``` ```
[Compose command-line reference](https://docs.docker.com/compose/reference/) describes the usage information for the docker-compose subcommands. [Docker Compose command-line reference](https://docs.docker.com/compose/reference/) describes the usage information for the docker-compose subcommands.
### Persistent data and log files ### Persistent data and log files
By default, data of database and image files in registry are persisted in directory **/data/** of the target machine. When Harbor's containers are removed and recreated the data will remain unchanged. By default, the data of database and image files in the registry are persisted in the directory **/data/** of the target machine. When Harbor's containers are removed and recreated, the data remain unchanged.
Harbor leverages rsyslog to collect the logs of each container, by default the log files are stored in directory **/var/log/harbor/** . Harbor leverages rsyslog to collect the logs of each container, by default the log files are stored in the directory **/var/log/harbor/** on Harbor's host.